2 * cpfile.c - NILFS checkpoint file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/buffer_head.h>
27 #include <linux/errno.h>
28 #include <linux/nilfs2_fs.h>
33 static inline unsigned long
34 nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
36 return NILFS_MDT(cpfile)->mi_entries_per_block;
39 /* block number from the beginning of the file */
41 nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
43 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
44 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
45 return (unsigned long)tcno;
50 nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
52 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
53 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
57 nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
62 nilfs_cpfile_checkpoints_per_block(cpfile) -
63 nilfs_cpfile_get_offset(cpfile, curr),
67 static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
70 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
74 nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
75 struct buffer_head *bh,
79 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
82 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
83 cp->cp_checkpoints_count = cpu_to_le32(count);
88 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
89 struct buffer_head *bh,
93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
96 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
97 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
98 cp->cp_checkpoints_count = cpu_to_le32(count);
102 static inline struct nilfs_cpfile_header *
103 nilfs_cpfile_block_get_header(const struct inode *cpfile,
104 struct buffer_head *bh,
107 return kaddr + bh_offset(bh);
110 static struct nilfs_checkpoint *
111 nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
112 struct buffer_head *bh,
115 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
116 NILFS_MDT(cpfile)->mi_entry_size;
119 static void nilfs_cpfile_block_init(struct inode *cpfile,
120 struct buffer_head *bh,
123 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
124 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
125 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
128 nilfs_checkpoint_set_invalid(cp);
129 cp = (void *)cp + cpsz;
133 static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
134 struct buffer_head **bhp)
136 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
139 static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
142 struct buffer_head **bhp)
144 return nilfs_mdt_get_block(cpfile,
145 nilfs_cpfile_get_blkoff(cpfile, cno),
146 create, nilfs_cpfile_block_init, bhp);
149 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
152 return nilfs_mdt_delete_block(cpfile,
153 nilfs_cpfile_get_blkoff(cpfile, cno));
157 * nilfs_cpfile_get_checkpoint - get a checkpoint
158 * @cpfile: inode of checkpoint file
159 * @cno: checkpoint number
160 * @create: create flag
161 * @cpp: pointer to a checkpoint
162 * @bhp: pointer to a buffer head
164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
165 * specified by @cno. A new checkpoint will be created if @cno is the current
166 * checkpoint number and @create is nonzero.
168 * Return Value: On success, 0 is returned, and the checkpoint and the
169 * buffer head of the buffer on which the checkpoint is located are stored in
170 * the place pointed by @cpp and @bhp, respectively. On error, one of the
171 * following negative error codes is returned.
175 * %-ENOMEM - Insufficient amount of memory available.
177 * %-ENOENT - No such checkpoint.
179 * %-EINVAL - invalid checkpoint.
181 int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
184 struct nilfs_checkpoint **cpp,
185 struct buffer_head **bhp)
187 struct buffer_head *header_bh, *cp_bh;
188 struct nilfs_cpfile_header *header;
189 struct nilfs_checkpoint *cp;
193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
194 (cno < nilfs_mdt_cno(cpfile) && create)))
197 down_write(&NILFS_MDT(cpfile)->mi_sem);
199 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
202 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
205 kaddr = kmap(cp_bh->b_page);
206 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
207 if (nilfs_checkpoint_invalid(cp)) {
209 kunmap(cp_bh->b_page);
214 /* a newly-created checkpoint */
215 nilfs_checkpoint_clear_invalid(cp);
216 if (!nilfs_cpfile_is_in_first(cpfile, cno))
217 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
219 nilfs_mdt_mark_buffer_dirty(cp_bh);
221 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
222 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
224 le64_add_cpu(&header->ch_ncheckpoints, 1);
225 kunmap_atomic(kaddr, KM_USER0);
226 nilfs_mdt_mark_buffer_dirty(header_bh);
227 nilfs_mdt_mark_dirty(cpfile);
238 up_write(&NILFS_MDT(cpfile)->mi_sem);
243 * nilfs_cpfile_put_checkpoint - put a checkpoint
244 * @cpfile: inode of checkpoint file
245 * @cno: checkpoint number
248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
249 * specified by @cno. @bh must be the buffer head which has been returned by
250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
252 void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
253 struct buffer_head *bh)
260 * nilfs_cpfile_delete_checkpoints - delete checkpoints
261 * @cpfile: inode of checkpoint file
262 * @start: start checkpoint number
263 * @end: end checkpoint numer
265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
266 * the period from @start to @end, excluding @end itself. The checkpoints
267 * which have been already deleted are ignored.
269 * Return Value: On success, 0 is returned. On error, one of the following
270 * negative error codes is returned.
274 * %-ENOMEM - Insufficient amount of memory available.
276 * %-EINVAL - invalid checkpoints.
278 int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
282 struct buffer_head *header_bh, *cp_bh;
283 struct nilfs_cpfile_header *header;
284 struct nilfs_checkpoint *cp;
285 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
288 unsigned long tnicps;
289 int ret, ncps, nicps, count, i;
291 if (unlikely(start == 0 || start > end)) {
292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
293 "[%llu, %llu)\n", __func__,
294 (unsigned long long)start, (unsigned long long)end);
298 down_write(&NILFS_MDT(cpfile)->mi_sem);
300 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
305 for (cno = start; cno < end; cno += ncps) {
306 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
307 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
316 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
317 cp = nilfs_cpfile_block_get_checkpoint(
318 cpfile, cno, cp_bh, kaddr);
320 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
321 WARN_ON(nilfs_checkpoint_snapshot(cp));
322 if (!nilfs_checkpoint_invalid(cp)) {
323 nilfs_checkpoint_set_invalid(cp);
329 nilfs_mdt_mark_buffer_dirty(cp_bh);
330 nilfs_mdt_mark_dirty(cpfile);
331 if (!nilfs_cpfile_is_in_first(cpfile, cno) &&
332 (count = nilfs_cpfile_block_sub_valid_checkpoints(
333 cpfile, cp_bh, kaddr, nicps)) == 0) {
335 kunmap_atomic(kaddr, KM_USER0);
337 ret = nilfs_cpfile_delete_checkpoint_block(
341 printk(KERN_ERR "%s: cannot delete block\n",
347 kunmap_atomic(kaddr, KM_USER0);
352 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
353 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
355 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
356 nilfs_mdt_mark_buffer_dirty(header_bh);
357 nilfs_mdt_mark_dirty(cpfile);
358 kunmap_atomic(kaddr, KM_USER0);
365 up_write(&NILFS_MDT(cpfile)->mi_sem);
369 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
370 struct nilfs_checkpoint *cp,
371 struct nilfs_cpinfo *ci)
373 ci->ci_flags = le32_to_cpu(cp->cp_flags);
374 ci->ci_cno = le64_to_cpu(cp->cp_cno);
375 ci->ci_create = le64_to_cpu(cp->cp_create);
376 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
377 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
378 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
379 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
382 static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
383 void *buf, unsigned cisz, size_t nci)
385 struct nilfs_checkpoint *cp;
386 struct nilfs_cpinfo *ci = buf;
387 struct buffer_head *bh;
388 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
389 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
395 return -ENOENT; /* checkpoint number 0 is invalid */
396 down_read(&NILFS_MDT(cpfile)->mi_sem);
398 for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
399 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
400 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
404 continue; /* skip hole */
407 kaddr = kmap_atomic(bh->b_page, KM_USER0);
408 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
409 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
410 if (!nilfs_checkpoint_invalid(cp)) {
411 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
413 ci = (void *)ci + cisz;
417 kunmap_atomic(kaddr, KM_USER0);
423 ci = (void *)ci - cisz;
424 *cnop = ci->ci_cno + 1;
428 up_read(&NILFS_MDT(cpfile)->mi_sem);
432 static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
433 void *buf, unsigned cisz, size_t nci)
435 struct buffer_head *bh;
436 struct nilfs_cpfile_header *header;
437 struct nilfs_checkpoint *cp;
438 struct nilfs_cpinfo *ci = buf;
439 __u64 curr = *cnop, next;
440 unsigned long curr_blkoff, next_blkoff;
444 down_read(&NILFS_MDT(cpfile)->mi_sem);
447 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
450 kaddr = kmap_atomic(bh->b_page, KM_USER0);
451 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
452 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
453 kunmap_atomic(kaddr, KM_USER0);
459 } else if (unlikely(curr == ~(__u64)0)) {
464 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
465 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
466 if (unlikely(ret < 0)) {
468 ret = 0; /* No snapshots (started from a hole block) */
471 kaddr = kmap_atomic(bh->b_page, KM_USER0);
473 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
474 curr = ~(__u64)0; /* Terminator */
475 if (unlikely(nilfs_checkpoint_invalid(cp) ||
476 !nilfs_checkpoint_snapshot(cp)))
478 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
479 ci = (void *)ci + cisz;
481 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
483 break; /* reach end of the snapshot list */
485 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
486 if (curr_blkoff != next_blkoff) {
487 kunmap_atomic(kaddr, KM_USER0);
489 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
491 if (unlikely(ret < 0)) {
492 WARN_ON(ret == -ENOENT);
495 kaddr = kmap_atomic(bh->b_page, KM_USER0);
498 curr_blkoff = next_blkoff;
500 kunmap_atomic(kaddr, KM_USER0);
506 up_read(&NILFS_MDT(cpfile)->mi_sem);
511 * nilfs_cpfile_get_cpinfo -
518 ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
519 void *buf, unsigned cisz, size_t nci)
522 case NILFS_CHECKPOINT:
523 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
525 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
532 * nilfs_cpfile_delete_checkpoint -
536 int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
538 struct nilfs_cpinfo ci;
542 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
545 else if (nci == 0 || ci.ci_cno != cno)
547 else if (nilfs_cpinfo_snapshot(&ci))
550 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
553 static struct nilfs_snapshot_list *
554 nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
556 struct buffer_head *bh,
559 struct nilfs_cpfile_header *header;
560 struct nilfs_checkpoint *cp;
561 struct nilfs_snapshot_list *list;
564 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
565 list = &cp->cp_snapshot_list;
567 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
568 list = &header->ch_snapshot_list;
573 static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
575 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
576 struct nilfs_cpfile_header *header;
577 struct nilfs_checkpoint *cp;
578 struct nilfs_snapshot_list *list;
580 unsigned long curr_blkoff, prev_blkoff;
585 return -ENOENT; /* checkpoint number 0 is invalid */
586 down_write(&NILFS_MDT(cpfile)->mi_sem);
588 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
591 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
592 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
593 if (nilfs_checkpoint_invalid(cp)) {
595 kunmap_atomic(kaddr, KM_USER0);
598 if (nilfs_checkpoint_snapshot(cp)) {
600 kunmap_atomic(kaddr, KM_USER0);
603 kunmap_atomic(kaddr, KM_USER0);
605 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
608 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
609 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
610 list = &header->ch_snapshot_list;
615 prev = le64_to_cpu(list->ssl_prev);
617 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
619 if (curr_blkoff != prev_blkoff) {
620 kunmap_atomic(kaddr, KM_USER0);
622 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
626 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
628 curr_blkoff = prev_blkoff;
629 cp = nilfs_cpfile_block_get_checkpoint(
630 cpfile, curr, curr_bh, kaddr);
631 list = &cp->cp_snapshot_list;
632 prev = le64_to_cpu(list->ssl_prev);
634 kunmap_atomic(kaddr, KM_USER0);
637 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
646 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
647 list = nilfs_cpfile_block_get_snapshot_list(
648 cpfile, curr, curr_bh, kaddr);
649 list->ssl_prev = cpu_to_le64(cno);
650 kunmap_atomic(kaddr, KM_USER0);
652 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
653 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
654 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
655 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
656 nilfs_checkpoint_set_snapshot(cp);
657 kunmap_atomic(kaddr, KM_USER0);
659 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
660 list = nilfs_cpfile_block_get_snapshot_list(
661 cpfile, prev, prev_bh, kaddr);
662 list->ssl_next = cpu_to_le64(cno);
663 kunmap_atomic(kaddr, KM_USER0);
665 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
666 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
667 le64_add_cpu(&header->ch_nsnapshots, 1);
668 kunmap_atomic(kaddr, KM_USER0);
670 nilfs_mdt_mark_buffer_dirty(prev_bh);
671 nilfs_mdt_mark_buffer_dirty(curr_bh);
672 nilfs_mdt_mark_buffer_dirty(cp_bh);
673 nilfs_mdt_mark_buffer_dirty(header_bh);
674 nilfs_mdt_mark_dirty(cpfile);
688 up_write(&NILFS_MDT(cpfile)->mi_sem);
692 static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
694 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
695 struct nilfs_cpfile_header *header;
696 struct nilfs_checkpoint *cp;
697 struct nilfs_snapshot_list *list;
703 return -ENOENT; /* checkpoint number 0 is invalid */
704 down_write(&NILFS_MDT(cpfile)->mi_sem);
706 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
709 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
710 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
711 if (nilfs_checkpoint_invalid(cp)) {
713 kunmap_atomic(kaddr, KM_USER0);
716 if (!nilfs_checkpoint_snapshot(cp)) {
718 kunmap_atomic(kaddr, KM_USER0);
722 list = &cp->cp_snapshot_list;
723 next = le64_to_cpu(list->ssl_next);
724 prev = le64_to_cpu(list->ssl_prev);
725 kunmap_atomic(kaddr, KM_USER0);
727 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
731 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
740 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
749 kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
750 list = nilfs_cpfile_block_get_snapshot_list(
751 cpfile, next, next_bh, kaddr);
752 list->ssl_prev = cpu_to_le64(prev);
753 kunmap_atomic(kaddr, KM_USER0);
755 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
756 list = nilfs_cpfile_block_get_snapshot_list(
757 cpfile, prev, prev_bh, kaddr);
758 list->ssl_next = cpu_to_le64(next);
759 kunmap_atomic(kaddr, KM_USER0);
761 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
762 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
763 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
764 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
765 nilfs_checkpoint_clear_snapshot(cp);
766 kunmap_atomic(kaddr, KM_USER0);
768 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
769 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
770 le64_add_cpu(&header->ch_nsnapshots, -1);
771 kunmap_atomic(kaddr, KM_USER0);
773 nilfs_mdt_mark_buffer_dirty(next_bh);
774 nilfs_mdt_mark_buffer_dirty(prev_bh);
775 nilfs_mdt_mark_buffer_dirty(cp_bh);
776 nilfs_mdt_mark_buffer_dirty(header_bh);
777 nilfs_mdt_mark_dirty(cpfile);
791 up_write(&NILFS_MDT(cpfile)->mi_sem);
796 * nilfs_cpfile_is_snapshot -
797 * @cpfile: inode of checkpoint file
798 * @cno: checkpoint number
802 * Return Value: On success, 1 is returned if the checkpoint specified by
803 * @cno is a snapshot, or 0 if not. On error, one of the following negative
804 * error codes is returned.
808 * %-ENOMEM - Insufficient amount of memory available.
810 * %-ENOENT - No such checkpoint.
812 int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
814 struct buffer_head *bh;
815 struct nilfs_checkpoint *cp;
820 return -ENOENT; /* checkpoint number 0 is invalid */
821 down_read(&NILFS_MDT(cpfile)->mi_sem);
823 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
826 kaddr = kmap_atomic(bh->b_page, KM_USER0);
827 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
828 ret = nilfs_checkpoint_snapshot(cp);
829 kunmap_atomic(kaddr, KM_USER0);
833 up_read(&NILFS_MDT(cpfile)->mi_sem);
838 * nilfs_cpfile_change_cpmode - change checkpoint mode
839 * @cpfile: inode of checkpoint file
840 * @cno: checkpoint number
841 * @status: mode of checkpoint
843 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
844 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
846 * Return Value: On success, 0 is returned. On error, one of the following
847 * negative error codes is returned.
851 * %-ENOMEM - Insufficient amount of memory available.
853 * %-ENOENT - No such checkpoint.
855 int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
857 struct the_nilfs *nilfs;
860 nilfs = NILFS_MDT(cpfile)->mi_nilfs;
863 case NILFS_CHECKPOINT:
865 * Check for protecting existing snapshot mounts:
866 * ns_mount_mutex is used to make this operation atomic and
867 * exclusive with a new mount job. Though it doesn't cover
868 * umount, it's enough for the purpose.
870 mutex_lock(&nilfs->ns_mount_mutex);
871 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
872 /* Current implementation does not have to protect
873 plain read-only mounts since they are exclusive
874 with a read/write mount and are protected from the
878 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
879 mutex_unlock(&nilfs->ns_mount_mutex);
882 return nilfs_cpfile_set_snapshot(cpfile, cno);
889 * nilfs_cpfile_get_stat - get checkpoint statistics
890 * @cpfile: inode of checkpoint file
891 * @stat: pointer to a structure of checkpoint statistics
893 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
895 * Return Value: On success, 0 is returned, and checkpoints information is
896 * stored in the place pointed by @stat. On error, one of the following
897 * negative error codes is returned.
901 * %-ENOMEM - Insufficient amount of memory available.
903 int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
905 struct buffer_head *bh;
906 struct nilfs_cpfile_header *header;
910 down_read(&NILFS_MDT(cpfile)->mi_sem);
912 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
915 kaddr = kmap_atomic(bh->b_page, KM_USER0);
916 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
917 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
918 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
919 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
920 kunmap_atomic(kaddr, KM_USER0);
924 up_read(&NILFS_MDT(cpfile)->mi_sem);