2 * sufile.c - NILFS segment usage file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/buffer_head.h>
27 #include <linux/errno.h>
28 #include <linux/nilfs2_fs.h>
33 static inline unsigned long
34 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
36 return NILFS_MDT(sufile)->mi_entries_per_block;
40 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
42 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
43 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
44 return (unsigned long)t;
48 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
50 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
51 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
55 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
58 return min_t(unsigned long,
59 nilfs_sufile_segment_usages_per_block(sufile) -
60 nilfs_sufile_get_offset(sufile, curr),
64 static inline struct nilfs_sufile_header *
65 nilfs_sufile_block_get_header(const struct inode *sufile,
66 struct buffer_head *bh,
69 return kaddr + bh_offset(bh);
72 static struct nilfs_segment_usage *
73 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
74 struct buffer_head *bh, void *kaddr)
76 return kaddr + bh_offset(bh) +
77 nilfs_sufile_get_offset(sufile, segnum) *
78 NILFS_MDT(sufile)->mi_entry_size;
81 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
82 struct buffer_head **bhp)
84 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
88 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
89 int create, struct buffer_head **bhp)
91 return nilfs_mdt_get_block(sufile,
92 nilfs_sufile_get_blkoff(sufile, segnum),
97 * nilfs_sufile_alloc - allocate a segment
98 * @sufile: inode of segment usage file
99 * @segnump: pointer to segment number
101 * Description: nilfs_sufile_alloc() allocates a clean segment.
103 * Return Value: On success, 0 is returned and the segment number of the
104 * allocated segment is stored in the place pointed by @segnump. On error, one
105 * of the following negative error codes is returned.
109 * %-ENOMEM - Insufficient amount of memory available.
111 * %-ENOSPC - No clean segment left.
113 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
115 struct buffer_head *header_bh, *su_bh;
116 struct the_nilfs *nilfs;
117 struct nilfs_sufile_header *header;
118 struct nilfs_segment_usage *su;
119 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
120 __u64 segnum, maxsegnum, last_alloc;
122 unsigned long nsegments, ncleansegs, nsus;
125 down_write(&NILFS_MDT(sufile)->mi_sem);
127 nilfs = NILFS_MDT(sufile)->mi_nilfs;
129 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
132 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
133 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
134 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
135 last_alloc = le64_to_cpu(header->sh_last_alloc);
136 kunmap_atomic(kaddr, KM_USER0);
138 nsegments = nilfs_sufile_get_nsegments(sufile);
139 segnum = last_alloc + 1;
140 maxsegnum = nsegments - 1;
141 for (i = 0; i < nsegments; i += nsus) {
142 if (segnum >= nsegments) {
145 maxsegnum = last_alloc;
147 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
151 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
152 su = nilfs_sufile_block_get_segment_usage(
153 sufile, segnum, su_bh, kaddr);
155 nsus = nilfs_sufile_segment_usages_in_block(
156 sufile, segnum, maxsegnum);
157 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
158 if (!nilfs_segment_usage_clean(su))
160 /* found a clean segment */
161 nilfs_segment_usage_set_dirty(su);
162 kunmap_atomic(kaddr, KM_USER0);
164 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
165 header = nilfs_sufile_block_get_header(
166 sufile, header_bh, kaddr);
167 le64_add_cpu(&header->sh_ncleansegs, -1);
168 le64_add_cpu(&header->sh_ndirtysegs, 1);
169 header->sh_last_alloc = cpu_to_le64(segnum);
170 kunmap_atomic(kaddr, KM_USER0);
172 nilfs_mdt_mark_buffer_dirty(header_bh);
173 nilfs_mdt_mark_buffer_dirty(su_bh);
174 nilfs_mdt_mark_dirty(sufile);
180 kunmap_atomic(kaddr, KM_USER0);
184 /* no segments left */
191 up_write(&NILFS_MDT(sufile)->mi_sem);
196 * nilfs_sufile_cancel_free -
197 * @sufile: inode of segment usage file
198 * @segnum: segment number
202 * Return Value: On success, 0 is returned. On error, one of the following
203 * negative error codes is returned.
207 * %-ENOMEM - Insufficient amount of memory available.
209 int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum)
211 struct buffer_head *header_bh, *su_bh;
212 struct the_nilfs *nilfs;
213 struct nilfs_sufile_header *header;
214 struct nilfs_segment_usage *su;
218 down_write(&NILFS_MDT(sufile)->mi_sem);
220 nilfs = NILFS_MDT(sufile)->mi_nilfs;
222 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
226 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
230 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
231 su = nilfs_sufile_block_get_segment_usage(
232 sufile, segnum, su_bh, kaddr);
233 if (unlikely(!nilfs_segment_usage_clean(su))) {
234 printk(KERN_WARNING "%s: segment %llu must be clean\n",
235 __func__, (unsigned long long)segnum);
236 kunmap_atomic(kaddr, KM_USER0);
239 nilfs_segment_usage_set_dirty(su);
240 kunmap_atomic(kaddr, KM_USER0);
242 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
243 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
244 le64_add_cpu(&header->sh_ncleansegs, -1);
245 le64_add_cpu(&header->sh_ndirtysegs, 1);
246 kunmap_atomic(kaddr, KM_USER0);
248 nilfs_mdt_mark_buffer_dirty(header_bh);
249 nilfs_mdt_mark_buffer_dirty(su_bh);
250 nilfs_mdt_mark_dirty(sufile);
257 up_write(&NILFS_MDT(sufile)->mi_sem);
262 * nilfs_sufile_freev - free segments
263 * @sufile: inode of segment usage file
264 * @segnum: array of segment numbers
265 * @nsegs: number of segments
267 * Description: nilfs_sufile_freev() frees segments specified by @segnum and
268 * @nsegs, which must have been returned by a previous call to
269 * nilfs_sufile_alloc().
271 * Return Value: On success, 0 is returned. On error, one of the following
272 * negative error codes is returned.
276 * %-ENOMEM - Insufficient amount of memory available.
278 #define NILFS_SUFILE_FREEV_PREALLOC 16
279 int nilfs_sufile_freev(struct inode *sufile, __u64 *segnum, size_t nsegs)
281 struct buffer_head *header_bh, **su_bh,
282 *su_bh_prealloc[NILFS_SUFILE_FREEV_PREALLOC];
283 struct the_nilfs *nilfs;
284 struct nilfs_sufile_header *header;
285 struct nilfs_segment_usage *su;
289 down_write(&NILFS_MDT(sufile)->mi_sem);
291 nilfs = NILFS_MDT(sufile)->mi_nilfs;
293 /* prepare resources */
294 if (nsegs <= NILFS_SUFILE_FREEV_PREALLOC)
295 su_bh = su_bh_prealloc;
297 su_bh = kmalloc(sizeof(*su_bh) * nsegs, GFP_NOFS);
304 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
307 for (i = 0; i < nsegs; i++) {
308 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum[i],
315 for (i = 0; i < nsegs; i++) {
316 kaddr = kmap_atomic(su_bh[i]->b_page, KM_USER0);
317 su = nilfs_sufile_block_get_segment_usage(
318 sufile, segnum[i], su_bh[i], kaddr);
319 WARN_ON(nilfs_segment_usage_error(su));
320 nilfs_segment_usage_set_clean(su);
321 kunmap_atomic(kaddr, KM_USER0);
322 nilfs_mdt_mark_buffer_dirty(su_bh[i]);
324 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
325 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
326 le64_add_cpu(&header->sh_ncleansegs, nsegs);
327 le64_add_cpu(&header->sh_ndirtysegs, -(u64)nsegs);
328 kunmap_atomic(kaddr, KM_USER0);
329 nilfs_mdt_mark_buffer_dirty(header_bh);
330 nilfs_mdt_mark_dirty(sufile);
333 for (i--; i >= 0; i--)
338 if (su_bh != su_bh_prealloc)
342 up_write(&NILFS_MDT(sufile)->mi_sem);
347 * nilfs_sufile_free -
351 int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
353 return nilfs_sufile_freev(sufile, &segnum, 1);
357 * nilfs_sufile_get_segment_usage - get a segment usage
358 * @sufile: inode of segment usage file
359 * @segnum: segment number
360 * @sup: pointer to segment usage
361 * @bhp: pointer to buffer head
363 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
364 * specified by @segnum.
366 * Return Value: On success, 0 is returned, and the segment usage and the
367 * buffer head of the buffer on which the segment usage is located are stored
368 * in the place pointed by @sup and @bhp, respectively. On error, one of the
369 * following negative error codes is returned.
373 * %-ENOMEM - Insufficient amount of memory available.
375 * %-EINVAL - Invalid segment usage number.
377 int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
378 struct nilfs_segment_usage **sup,
379 struct buffer_head **bhp)
381 struct buffer_head *bh;
382 struct nilfs_segment_usage *su;
386 /* segnum is 0 origin */
387 if (segnum >= nilfs_sufile_get_nsegments(sufile))
389 down_write(&NILFS_MDT(sufile)->mi_sem);
390 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
393 kaddr = kmap(bh->b_page);
394 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
395 if (nilfs_segment_usage_error(su)) {
407 up_write(&NILFS_MDT(sufile)->mi_sem);
412 * nilfs_sufile_put_segment_usage - put a segment usage
413 * @sufile: inode of segment usage file
414 * @segnum: segment number
417 * Description: nilfs_sufile_put_segment_usage() releases the segment usage
418 * specified by @segnum. @bh must be the buffer head which have been returned
419 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
421 void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
422 struct buffer_head *bh)
429 * nilfs_sufile_get_stat - get segment usage statistics
430 * @sufile: inode of segment usage file
431 * @stat: pointer to a structure of segment usage statistics
433 * Description: nilfs_sufile_get_stat() returns information about segment
436 * Return Value: On success, 0 is returned, and segment usage information is
437 * stored in the place pointed by @stat. On error, one of the following
438 * negative error codes is returned.
442 * %-ENOMEM - Insufficient amount of memory available.
444 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
446 struct buffer_head *header_bh;
447 struct nilfs_sufile_header *header;
448 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
452 down_read(&NILFS_MDT(sufile)->mi_sem);
454 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
458 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
459 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
460 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
461 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
462 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
463 sustat->ss_ctime = nilfs->ns_ctime;
464 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
465 spin_lock(&nilfs->ns_last_segment_lock);
466 sustat->ss_prot_seq = nilfs->ns_prot_seq;
467 spin_unlock(&nilfs->ns_last_segment_lock);
468 kunmap_atomic(kaddr, KM_USER0);
472 up_read(&NILFS_MDT(sufile)->mi_sem);
477 * nilfs_sufile_get_ncleansegs - get the number of clean segments
478 * @sufile: inode of segment usage file
479 * @nsegsp: pointer to the number of clean segments
481 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
484 * Return Value: On success, 0 is returned and the number of clean segments is
485 * stored in the place pointed by @nsegsp. On error, one of the following
486 * negative error codes is returned.
490 * %-ENOMEM - Insufficient amount of memory available.
492 int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
494 struct nilfs_sustat sustat;
497 ret = nilfs_sufile_get_stat(sufile, &sustat);
499 *nsegsp = sustat.ss_ncleansegs;
504 * nilfs_sufile_set_error - mark a segment as erroneous
505 * @sufile: inode of segment usage file
506 * @segnum: segment number
508 * Description: nilfs_sufile_set_error() marks the segment specified by
509 * @segnum as erroneous. The error segment will never be used again.
511 * Return Value: On success, 0 is returned. On error, one of the following
512 * negative error codes is returned.
516 * %-ENOMEM - Insufficient amount of memory available.
518 * %-EINVAL - Invalid segment usage number.
520 int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
522 struct buffer_head *header_bh, *su_bh;
523 struct nilfs_segment_usage *su;
524 struct nilfs_sufile_header *header;
528 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
529 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
530 __func__, (unsigned long long)segnum);
533 down_write(&NILFS_MDT(sufile)->mi_sem);
535 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
538 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
542 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
543 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
544 if (nilfs_segment_usage_error(su)) {
545 kunmap_atomic(kaddr, KM_USER0);
550 nilfs_segment_usage_set_error(su);
551 kunmap_atomic(kaddr, KM_USER0);
554 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
555 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
556 le64_add_cpu(&header->sh_ndirtysegs, -1);
557 kunmap_atomic(kaddr, KM_USER0);
558 nilfs_mdt_mark_buffer_dirty(header_bh);
559 nilfs_mdt_mark_buffer_dirty(su_bh);
560 nilfs_mdt_mark_dirty(sufile);
567 up_write(&NILFS_MDT(sufile)->mi_sem);
572 * nilfs_sufile_get_suinfo -
573 * @sufile: inode of segment usage file
574 * @segnum: segment number to start looking
575 * @si: array of suinfo
576 * @nsi: size of suinfo array
580 * Return Value: On success, 0 is returned and .... On error, one of the
581 * following negative error codes is returned.
585 * %-ENOMEM - Insufficient amount of memory available.
587 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum,
588 struct nilfs_suinfo *si, size_t nsi)
590 struct buffer_head *su_bh;
591 struct nilfs_segment_usage *su;
592 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
593 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
595 unsigned long nsegs, segusages_per_block;
599 down_read(&NILFS_MDT(sufile)->mi_sem);
601 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
602 nsegs = min_t(unsigned long,
603 nilfs_sufile_get_nsegments(sufile) - segnum,
605 for (i = 0; i < nsegs; i += n, segnum += n) {
606 n = min_t(unsigned long,
607 segusages_per_block -
608 nilfs_sufile_get_offset(sufile, segnum),
610 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
616 memset(&si[i], 0, sizeof(struct nilfs_suinfo) * n);
620 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
621 su = nilfs_sufile_block_get_segment_usage(
622 sufile, segnum, su_bh, kaddr);
623 for (j = 0; j < n; j++, su = (void *)su + susz) {
624 si[i + j].sui_lastmod = le64_to_cpu(su->su_lastmod);
625 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks);
626 si[i + j].sui_flags = le32_to_cpu(su->su_flags) &
627 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
628 if (nilfs_segment_is_active(nilfs, segnum + i + j))
629 si[i + j].sui_flags |=
630 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
632 kunmap_atomic(kaddr, KM_USER0);
638 up_read(&NILFS_MDT(sufile)->mi_sem);