[GFS2] Add an additional argument to gfs2_trans_add_bh()
[linux-2.6] / fs / gfs2 / log.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License v.2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <asm/semaphore.h>
16
17 #include "gfs2.h"
18 #include "bmap.h"
19 #include "glock.h"
20 #include "log.h"
21 #include "lops.h"
22 #include "meta_io.h"
23
24 #define PULL 1
25
26 static inline int is_done(struct gfs2_sbd *sdp, atomic_t *a)
27 {
28         int done;
29         gfs2_log_lock(sdp);
30         done = atomic_read(a) ? 0 : 1;
31         gfs2_log_unlock(sdp);
32         return done;
33 }
34
35 static void do_lock_wait(struct gfs2_sbd *sdp, wait_queue_head_t *wq,
36                          atomic_t *a)
37 {
38         gfs2_log_unlock(sdp);
39         wait_event(*wq, is_done(sdp, a));
40         gfs2_log_lock(sdp);
41 }
42
43 static void lock_for_trans(struct gfs2_sbd *sdp)
44 {
45         gfs2_log_lock(sdp);
46         do_lock_wait(sdp, &sdp->sd_log_trans_wq, &sdp->sd_log_flush_count);
47         atomic_inc(&sdp->sd_log_trans_count);
48         gfs2_log_unlock(sdp);
49 }
50
51 static void unlock_from_trans(struct gfs2_sbd *sdp)
52 {
53         gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_trans_count));
54         if (atomic_dec_and_test(&sdp->sd_log_trans_count))
55                 wake_up(&sdp->sd_log_flush_wq);
56 }
57
58 void gfs2_lock_for_flush(struct gfs2_sbd *sdp)
59 {
60         gfs2_log_lock(sdp);
61         atomic_inc(&sdp->sd_log_flush_count);
62         do_lock_wait(sdp, &sdp->sd_log_flush_wq, &sdp->sd_log_trans_count);
63         gfs2_log_unlock(sdp);
64 }
65
66 void gfs2_unlock_from_flush(struct gfs2_sbd *sdp)
67 {
68         gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_flush_count));
69         if (atomic_dec_and_test(&sdp->sd_log_flush_count))
70                 wake_up(&sdp->sd_log_trans_wq);
71 }
72
73 /**
74  * gfs2_struct2blk - compute stuff
75  * @sdp: the filesystem
76  * @nstruct: the number of structures
77  * @ssize: the size of the structures
78  *
79  * Compute the number of log descriptor blocks needed to hold a certain number
80  * of structures of a certain size.
81  *
82  * Returns: the number of blocks needed (minimum is always 1)
83  */
84
85 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
86                              unsigned int ssize)
87 {
88         unsigned int blks;
89         unsigned int first, second;
90
91         blks = 1;
92         first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
93
94         if (nstruct > first) {
95                 second = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / ssize;
96                 blks += DIV_RU(nstruct - first, second);
97         }
98
99         return blks;
100 }
101
102 void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
103 {
104         struct list_head *head = &sdp->sd_ail1_list;
105         uint64_t sync_gen;
106         struct list_head *first, *tmp;
107         struct gfs2_ail *first_ai, *ai;
108
109         gfs2_log_lock(sdp);
110         if (list_empty(head)) {
111                 gfs2_log_unlock(sdp);
112                 return;
113         }
114         sync_gen = sdp->sd_ail_sync_gen++;
115
116         first = head->prev;
117         first_ai = list_entry(first, struct gfs2_ail, ai_list);
118         first_ai->ai_sync_gen = sync_gen;
119         gfs2_ail1_start_one(sdp, first_ai);
120
121         if (flags & DIO_ALL)
122                 first = NULL;
123
124         for (;;) {
125                 if (first &&
126                     (head->prev != first ||
127                      gfs2_ail1_empty_one(sdp, first_ai, 0)))
128                         break;
129
130                 for (tmp = head->prev; tmp != head; tmp = tmp->prev) {
131                         ai = list_entry(tmp, struct gfs2_ail, ai_list);
132                         if (ai->ai_sync_gen >= sync_gen)
133                                 continue;
134                         ai->ai_sync_gen = sync_gen;
135                         gfs2_ail1_start_one(sdp, ai);
136                         break;
137                 }
138
139                 if (tmp == head)
140                         break;
141         }
142
143         gfs2_log_unlock(sdp);
144 }
145
146 int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
147 {
148         struct gfs2_ail *ai, *s;
149         int ret;
150
151         gfs2_log_lock(sdp);
152
153         list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
154                 if (gfs2_ail1_empty_one(sdp, ai, flags))
155                         list_move(&ai->ai_list, &sdp->sd_ail2_list);
156                 else if (!(flags & DIO_ALL))
157                         break;
158         }
159
160         ret = list_empty(&sdp->sd_ail1_list);
161
162         gfs2_log_unlock(sdp);
163
164         return ret;
165 }
166
167 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
168 {
169         struct gfs2_ail *ai, *safe;
170         unsigned int old_tail = sdp->sd_log_tail;
171         int wrap = (new_tail < old_tail);
172         int a, b, rm;
173
174         gfs2_log_lock(sdp);
175
176         list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
177                 a = (old_tail <= ai->ai_first);
178                 b = (ai->ai_first < new_tail);
179                 rm = (wrap) ? (a || b) : (a && b);
180                 if (!rm)
181                         continue;
182
183                 gfs2_ail2_empty_one(sdp, ai);
184                 list_del(&ai->ai_list);
185                 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
186                 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
187                 kfree(ai);
188         }
189
190         gfs2_log_unlock(sdp);
191 }
192
193 /**
194  * gfs2_log_reserve - Make a log reservation
195  * @sdp: The GFS2 superblock
196  * @blks: The number of blocks to reserve
197  *
198  * Returns: errno
199  */
200
201 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
202 {
203         LIST_HEAD(list);
204         unsigned int try = 0;
205
206         if (gfs2_assert_warn(sdp, blks) ||
207             gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
208                 return -EINVAL;
209
210         for (;;) {
211                 gfs2_log_lock(sdp);
212
213                 if (list_empty(&list)) {
214                         list_add_tail(&list, &sdp->sd_log_blks_list);
215                         while (sdp->sd_log_blks_list.next != &list) {
216                                 DECLARE_WAITQUEUE(__wait_chan, current);
217                                 set_current_state(TASK_UNINTERRUPTIBLE);
218                                 add_wait_queue(&sdp->sd_log_blks_wait,
219                                                &__wait_chan);
220                                 gfs2_log_unlock(sdp);
221                                 schedule();
222                                 gfs2_log_lock(sdp);
223                                 remove_wait_queue(&sdp->sd_log_blks_wait,
224                                                   &__wait_chan);
225                                 set_current_state(TASK_RUNNING);
226                         }
227                 }
228
229                 /* Never give away the last block so we can
230                    always pull the tail if we need to. */
231                 if (sdp->sd_log_blks_free > blks) {
232                         sdp->sd_log_blks_free -= blks;
233                         list_del(&list);
234                         gfs2_log_unlock(sdp);
235                         wake_up(&sdp->sd_log_blks_wait);
236                         break;
237                 }
238
239                 gfs2_log_unlock(sdp);
240
241                 gfs2_ail1_empty(sdp, 0);
242                 gfs2_log_flush(sdp);
243
244                 if (try++)
245                         gfs2_ail1_start(sdp, 0);
246         }
247
248         lock_for_trans(sdp);
249
250         return 0;
251 }
252
253 /**
254  * gfs2_log_release - Release a given number of log blocks
255  * @sdp: The GFS2 superblock
256  * @blks: The number of blocks
257  *
258  */
259
260 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
261 {
262         unlock_from_trans(sdp);
263
264         gfs2_log_lock(sdp);
265         sdp->sd_log_blks_free += blks;
266         gfs2_assert_withdraw(sdp,
267                              sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
268         gfs2_log_unlock(sdp);
269 }
270
271 static uint64_t log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
272 {
273         int new = 0;
274         uint64_t dbn;
275         int error;
276
277         error = gfs2_block_map(sdp->sd_jdesc->jd_inode, lbn, &new, &dbn, NULL);
278         gfs2_assert_withdraw(sdp, !error && dbn);
279
280         return dbn;
281 }
282
283 /**
284  * log_distance - Compute distance between two journal blocks
285  * @sdp: The GFS2 superblock
286  * @newer: The most recent journal block of the pair
287  * @older: The older journal block of the pair
288  *
289  *   Compute the distance (in the journal direction) between two
290  *   blocks in the journal
291  *
292  * Returns: the distance in blocks
293  */
294
295 static inline unsigned int log_distance(struct gfs2_sbd *sdp,
296                                         unsigned int newer,
297                                         unsigned int older)
298 {
299         int dist;
300
301         dist = newer - older;
302         if (dist < 0)
303                 dist += sdp->sd_jdesc->jd_blocks;
304
305         return dist;
306 }
307
308 static unsigned int current_tail(struct gfs2_sbd *sdp)
309 {
310         struct gfs2_ail *ai;
311         unsigned int tail;
312
313         gfs2_log_lock(sdp);
314
315         if (list_empty(&sdp->sd_ail1_list))
316                 tail = sdp->sd_log_head;
317         else {
318                 ai = list_entry(sdp->sd_ail1_list.prev,
319                                 struct gfs2_ail, ai_list);
320                 tail = ai->ai_first;
321         }
322
323         gfs2_log_unlock(sdp);
324
325         return tail;
326 }
327
328 static inline void log_incr_head(struct gfs2_sbd *sdp)
329 {
330         if (sdp->sd_log_flush_head == sdp->sd_log_tail)
331                 gfs2_assert_withdraw(sdp,
332                                 sdp->sd_log_flush_head == sdp->sd_log_head);
333
334         if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
335                 sdp->sd_log_flush_head = 0;
336                 sdp->sd_log_flush_wrapped = 1;
337         }
338 }
339
340 /**
341  * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
342  * @sdp: The GFS2 superblock
343  *
344  * Returns: the buffer_head
345  */
346
347 struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
348 {
349         uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
350         struct gfs2_log_buf *lb;
351         struct buffer_head *bh;
352
353         lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_KERNEL | __GFP_NOFAIL);
354         list_add(&lb->lb_list, &sdp->sd_log_flush_list);
355
356         bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
357         lock_buffer(bh);
358         memset(bh->b_data, 0, bh->b_size);
359         set_buffer_uptodate(bh);
360         clear_buffer_dirty(bh);
361         unlock_buffer(bh);
362
363         log_incr_head(sdp);
364
365         return bh;
366 }
367
368 /**
369  * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
370  * @sdp: the filesystem
371  * @data: the data the buffer_head should point to
372  *
373  * Returns: the log buffer descriptor
374  */
375
376 struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
377                                       struct buffer_head *real)
378 {
379         uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
380         struct gfs2_log_buf *lb;
381         struct buffer_head *bh;
382
383         lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_KERNEL | __GFP_NOFAIL);
384         list_add(&lb->lb_list, &sdp->sd_log_flush_list);
385         lb->lb_real = real;
386
387         bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
388         atomic_set(&bh->b_count, 1);
389         bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
390         set_bh_page(bh, virt_to_page(real->b_data),
391                     ((unsigned long)real->b_data) & (PAGE_SIZE - 1));
392         bh->b_blocknr = blkno;
393         bh->b_size = sdp->sd_sb.sb_bsize;
394         bh->b_bdev = sdp->sd_vfs->s_bdev;
395
396         log_incr_head(sdp);
397
398         return bh;
399 }
400
401 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail, int pull)
402 {
403         unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
404
405         ail2_empty(sdp, new_tail);
406
407         gfs2_log_lock(sdp);
408         sdp->sd_log_blks_free += dist - ((pull) ? 1 : 0);
409         gfs2_assert_withdraw(sdp,
410                              sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
411         gfs2_log_unlock(sdp);
412
413         sdp->sd_log_tail = new_tail;
414 }
415
416 /**
417  * log_write_header - Get and initialize a journal header buffer
418  * @sdp: The GFS2 superblock
419  *
420  * Returns: the initialized log buffer descriptor
421  */
422
423 static void log_write_header(struct gfs2_sbd *sdp, uint32_t flags, int pull)
424 {
425         uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
426         struct buffer_head *bh;
427         struct gfs2_log_header *lh;
428         unsigned int tail;
429         uint32_t hash;
430
431         atomic_inc(&sdp->sd_log_flush_ondisk);
432
433         bh = sb_getblk(sdp->sd_vfs, blkno);
434         lock_buffer(bh);
435         memset(bh->b_data, 0, bh->b_size);
436         set_buffer_uptodate(bh);
437         clear_buffer_dirty(bh);
438         unlock_buffer(bh);
439
440         gfs2_ail1_empty(sdp, 0);
441         tail = current_tail(sdp);
442
443         lh = (struct gfs2_log_header *)bh->b_data;
444         memset(lh, 0, sizeof(struct gfs2_log_header));
445         lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
446         lh->lh_header.mh_type = cpu_to_be16(GFS2_METATYPE_LH);
447         lh->lh_header.mh_format = cpu_to_be16(GFS2_FORMAT_LH);
448         lh->lh_sequence = be64_to_cpu(sdp->sd_log_sequence++);
449         lh->lh_flags = be32_to_cpu(flags);
450         lh->lh_tail = be32_to_cpu(tail);
451         lh->lh_blkno = be32_to_cpu(sdp->sd_log_flush_head);
452         hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
453         lh->lh_hash = cpu_to_be32(hash);
454
455         set_buffer_dirty(bh);
456         if (sync_dirty_buffer(bh))
457                 gfs2_io_error_bh(sdp, bh);
458         brelse(bh);
459
460         if (sdp->sd_log_tail != tail)
461                 log_pull_tail(sdp, tail, pull);
462         else
463                 gfs2_assert_withdraw(sdp, !pull);
464
465         sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
466         log_incr_head(sdp);
467 }
468
469 static void log_flush_commit(struct gfs2_sbd *sdp)
470 {
471         struct list_head *head = &sdp->sd_log_flush_list;
472         struct gfs2_log_buf *lb;
473         struct buffer_head *bh;
474         unsigned int d;
475
476         d = log_distance(sdp, sdp->sd_log_flush_head, sdp->sd_log_head);
477
478         gfs2_assert_withdraw(sdp, d + 1 == sdp->sd_log_blks_reserved);
479
480         while (!list_empty(head)) {
481                 lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
482                 list_del(&lb->lb_list);
483                 bh = lb->lb_bh;
484
485                 wait_on_buffer(bh);
486                 if (!buffer_uptodate(bh))
487                         gfs2_io_error_bh(sdp, bh);
488                 if (lb->lb_real) {
489                         while (atomic_read(&bh->b_count) != 1)  /* Grrrr... */
490                                 schedule();
491                         free_buffer_head(bh);
492                 } else
493                         brelse(bh);
494                 kfree(lb);
495         }
496
497         log_write_header(sdp, 0, 0);
498 }
499
500 /**
501  * gfs2_log_flush_i - flush incore transaction(s)
502  * @sdp: the filesystem
503  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
504  *
505  */
506
507 void gfs2_log_flush_i(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
508 {
509         struct gfs2_ail *ai;
510
511         atomic_inc(&sdp->sd_log_flush_incore);
512
513         ai = kzalloc(sizeof(struct gfs2_ail), GFP_KERNEL | __GFP_NOFAIL);
514         INIT_LIST_HEAD(&ai->ai_ail1_list);
515         INIT_LIST_HEAD(&ai->ai_ail2_list);
516
517         gfs2_lock_for_flush(sdp);
518         down(&sdp->sd_log_flush_lock);
519
520         gfs2_assert_withdraw(sdp,
521                         sdp->sd_log_num_buf == sdp->sd_log_commited_buf);
522         gfs2_assert_withdraw(sdp,
523                         sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
524
525         if (gl && list_empty(&gl->gl_le.le_list)) {
526                 up(&sdp->sd_log_flush_lock);
527                 gfs2_unlock_from_flush(sdp);
528                 kfree(ai);
529                 return;
530         }
531
532         sdp->sd_log_flush_head = sdp->sd_log_head;
533         sdp->sd_log_flush_wrapped = 0;
534         ai->ai_first = sdp->sd_log_flush_head;
535
536         lops_before_commit(sdp);
537         if (!list_empty(&sdp->sd_log_flush_list))
538                 log_flush_commit(sdp);
539         else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle)
540                 log_write_header(sdp, 0, PULL);
541         lops_after_commit(sdp, ai);
542
543         sdp->sd_log_head = sdp->sd_log_flush_head;
544         if (sdp->sd_log_flush_wrapped)
545                 sdp->sd_log_wraps++;
546
547         sdp->sd_log_blks_reserved =
548                 sdp->sd_log_commited_buf =
549                 sdp->sd_log_commited_revoke = 0;
550
551         gfs2_log_lock(sdp);
552         if (!list_empty(&ai->ai_ail1_list)) {
553                 list_add(&ai->ai_list, &sdp->sd_ail1_list);
554                 ai = NULL;
555         }
556         gfs2_log_unlock(sdp);
557
558         up(&sdp->sd_log_flush_lock);
559         sdp->sd_vfs->s_dirt = 0;
560         gfs2_unlock_from_flush(sdp);
561
562         kfree(ai);
563 }
564
565 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
566 {
567         unsigned int reserved = 1;
568         unsigned int old;
569
570         gfs2_log_lock(sdp);
571
572         sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
573         gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_buf) >= 0);
574         sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
575         gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
576
577         if (sdp->sd_log_commited_buf)
578                 reserved += 1 + sdp->sd_log_commited_buf + sdp->sd_log_commited_buf/503;
579         if (sdp->sd_log_commited_revoke)
580                 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
581                                             sizeof(uint64_t));
582
583         old = sdp->sd_log_blks_free;
584         sdp->sd_log_blks_free += tr->tr_reserved -
585                                  (reserved - sdp->sd_log_blks_reserved);
586
587         gfs2_assert_withdraw(sdp,
588                              sdp->sd_log_blks_free >= old);
589         gfs2_assert_withdraw(sdp,
590                              sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
591
592         sdp->sd_log_blks_reserved = reserved;
593
594         gfs2_log_unlock(sdp);
595 }
596
597 /**
598  * gfs2_log_commit - Commit a transaction to the log
599  * @sdp: the filesystem
600  * @tr: the transaction
601  *
602  * Returns: errno
603  */
604
605 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
606 {
607         log_refund(sdp, tr);
608         lops_incore_commit(sdp, tr);
609
610         sdp->sd_vfs->s_dirt = 1;
611         unlock_from_trans(sdp);
612
613         kfree(tr);
614
615         gfs2_log_lock(sdp);
616         if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) {
617                 gfs2_log_unlock(sdp);
618                 gfs2_log_flush(sdp);
619         } else
620                 gfs2_log_unlock(sdp);
621 }
622
623 /**
624  * gfs2_log_shutdown - write a shutdown header into a journal
625  * @sdp: the filesystem
626  *
627  */
628
629 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
630 {
631         down(&sdp->sd_log_flush_lock);
632
633         gfs2_assert_withdraw(sdp, !atomic_read(&sdp->sd_log_trans_count));
634         gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
635         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
636         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
637         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
638         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
639         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
640         gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
641
642         sdp->sd_log_flush_head = sdp->sd_log_head;
643         sdp->sd_log_flush_wrapped = 0;
644
645         log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, 0);
646
647         gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free ==
648                              sdp->sd_jdesc->jd_blocks);
649         gfs2_assert_withdraw(sdp, sdp->sd_log_head == sdp->sd_log_tail);
650         gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail2_list));
651
652         sdp->sd_log_head = sdp->sd_log_flush_head;
653         if (sdp->sd_log_flush_wrapped)
654                 sdp->sd_log_wraps++;
655         sdp->sd_log_tail = sdp->sd_log_head;
656
657         up(&sdp->sd_log_flush_lock);
658 }
659