2 * sufile.c - NILFS segment usage file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/buffer_head.h>
27 #include <linux/errno.h>
28 #include <linux/nilfs2_fs.h>
33 static inline unsigned long
34 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
36 return NILFS_MDT(sufile)->mi_entries_per_block;
40 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
42 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
43 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
44 return (unsigned long)t;
48 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
50 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
51 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
55 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
58 return min_t(unsigned long,
59 nilfs_sufile_segment_usages_per_block(sufile) -
60 nilfs_sufile_get_offset(sufile, curr),
64 static inline struct nilfs_sufile_header *
65 nilfs_sufile_block_get_header(const struct inode *sufile,
66 struct buffer_head *bh,
69 return kaddr + bh_offset(bh);
72 static struct nilfs_segment_usage *
73 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
74 struct buffer_head *bh, void *kaddr)
76 return kaddr + bh_offset(bh) +
77 nilfs_sufile_get_offset(sufile, segnum) *
78 NILFS_MDT(sufile)->mi_entry_size;
81 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
82 struct buffer_head **bhp)
84 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
88 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
89 int create, struct buffer_head **bhp)
91 return nilfs_mdt_get_block(sufile,
92 nilfs_sufile_get_blkoff(sufile, segnum),
96 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
97 u64 ncleanadd, u64 ndirtyadd)
99 struct nilfs_sufile_header *header;
102 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
103 header = kaddr + bh_offset(header_bh);
104 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
105 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
106 kunmap_atomic(kaddr, KM_USER0);
108 nilfs_mdt_mark_buffer_dirty(header_bh);
111 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
112 void (*dofunc)(struct inode *, __u64,
113 struct buffer_head *,
114 struct buffer_head *))
116 struct buffer_head *header_bh, *bh;
119 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
120 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
121 __func__, (unsigned long long)segnum);
124 down_write(&NILFS_MDT(sufile)->mi_sem);
126 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
130 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
132 dofunc(sufile, segnum, header_bh, bh);
138 up_write(&NILFS_MDT(sufile)->mi_sem);
143 * nilfs_sufile_alloc - allocate a segment
144 * @sufile: inode of segment usage file
145 * @segnump: pointer to segment number
147 * Description: nilfs_sufile_alloc() allocates a clean segment.
149 * Return Value: On success, 0 is returned and the segment number of the
150 * allocated segment is stored in the place pointed by @segnump. On error, one
151 * of the following negative error codes is returned.
155 * %-ENOMEM - Insufficient amount of memory available.
157 * %-ENOSPC - No clean segment left.
159 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
161 struct buffer_head *header_bh, *su_bh;
162 struct nilfs_sufile_header *header;
163 struct nilfs_segment_usage *su;
164 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
165 __u64 segnum, maxsegnum, last_alloc;
167 unsigned long nsegments, ncleansegs, nsus;
170 down_write(&NILFS_MDT(sufile)->mi_sem);
172 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
175 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
176 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
177 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
178 last_alloc = le64_to_cpu(header->sh_last_alloc);
179 kunmap_atomic(kaddr, KM_USER0);
181 nsegments = nilfs_sufile_get_nsegments(sufile);
182 segnum = last_alloc + 1;
183 maxsegnum = nsegments - 1;
184 for (i = 0; i < nsegments; i += nsus) {
185 if (segnum >= nsegments) {
188 maxsegnum = last_alloc;
190 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
194 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
195 su = nilfs_sufile_block_get_segment_usage(
196 sufile, segnum, su_bh, kaddr);
198 nsus = nilfs_sufile_segment_usages_in_block(
199 sufile, segnum, maxsegnum);
200 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
201 if (!nilfs_segment_usage_clean(su))
203 /* found a clean segment */
204 nilfs_segment_usage_set_dirty(su);
205 kunmap_atomic(kaddr, KM_USER0);
207 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
208 header = nilfs_sufile_block_get_header(
209 sufile, header_bh, kaddr);
210 le64_add_cpu(&header->sh_ncleansegs, -1);
211 le64_add_cpu(&header->sh_ndirtysegs, 1);
212 header->sh_last_alloc = cpu_to_le64(segnum);
213 kunmap_atomic(kaddr, KM_USER0);
215 nilfs_mdt_mark_buffer_dirty(header_bh);
216 nilfs_mdt_mark_buffer_dirty(su_bh);
217 nilfs_mdt_mark_dirty(sufile);
223 kunmap_atomic(kaddr, KM_USER0);
227 /* no segments left */
234 up_write(&NILFS_MDT(sufile)->mi_sem);
238 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
239 struct buffer_head *header_bh,
240 struct buffer_head *su_bh)
242 struct nilfs_segment_usage *su;
245 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
246 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
247 if (unlikely(!nilfs_segment_usage_clean(su))) {
248 printk(KERN_WARNING "%s: segment %llu must be clean\n",
249 __func__, (unsigned long long)segnum);
250 kunmap_atomic(kaddr, KM_USER0);
253 nilfs_segment_usage_set_dirty(su);
254 kunmap_atomic(kaddr, KM_USER0);
256 nilfs_sufile_mod_counter(header_bh, -1, 1);
257 nilfs_mdt_mark_buffer_dirty(su_bh);
258 nilfs_mdt_mark_dirty(sufile);
261 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
262 struct buffer_head *header_bh,
263 struct buffer_head *su_bh)
265 struct nilfs_segment_usage *su;
269 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
270 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
271 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
272 su->su_nblocks == cpu_to_le32(0)) {
273 kunmap_atomic(kaddr, KM_USER0);
276 clean = nilfs_segment_usage_clean(su);
277 dirty = nilfs_segment_usage_dirty(su);
279 /* make the segment garbage */
280 su->su_lastmod = cpu_to_le64(0);
281 su->su_nblocks = cpu_to_le32(0);
282 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
283 kunmap_atomic(kaddr, KM_USER0);
285 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
286 nilfs_mdt_mark_buffer_dirty(su_bh);
287 nilfs_mdt_mark_dirty(sufile);
290 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
291 struct buffer_head *header_bh,
292 struct buffer_head *su_bh)
294 struct nilfs_segment_usage *su;
298 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
299 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
300 if (nilfs_segment_usage_clean(su)) {
301 printk(KERN_WARNING "%s: segment %llu is already clean\n",
302 __func__, (unsigned long long)segnum);
303 kunmap_atomic(kaddr, KM_USER0);
306 WARN_ON(nilfs_segment_usage_error(su));
307 WARN_ON(!nilfs_segment_usage_dirty(su));
309 sudirty = nilfs_segment_usage_dirty(su);
310 nilfs_segment_usage_set_clean(su);
311 kunmap_atomic(kaddr, KM_USER0);
312 nilfs_mdt_mark_buffer_dirty(su_bh);
314 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
315 nilfs_mdt_mark_dirty(sufile);
319 * nilfs_sufile_get_segment_usage - get a segment usage
320 * @sufile: inode of segment usage file
321 * @segnum: segment number
322 * @sup: pointer to segment usage
323 * @bhp: pointer to buffer head
325 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
326 * specified by @segnum.
328 * Return Value: On success, 0 is returned, and the segment usage and the
329 * buffer head of the buffer on which the segment usage is located are stored
330 * in the place pointed by @sup and @bhp, respectively. On error, one of the
331 * following negative error codes is returned.
335 * %-ENOMEM - Insufficient amount of memory available.
337 * %-EINVAL - Invalid segment usage number.
339 int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
340 struct nilfs_segment_usage **sup,
341 struct buffer_head **bhp)
343 struct buffer_head *bh;
344 struct nilfs_segment_usage *su;
348 /* segnum is 0 origin */
349 if (segnum >= nilfs_sufile_get_nsegments(sufile))
351 down_write(&NILFS_MDT(sufile)->mi_sem);
352 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
355 kaddr = kmap(bh->b_page);
356 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
357 if (nilfs_segment_usage_error(su)) {
369 up_write(&NILFS_MDT(sufile)->mi_sem);
374 * nilfs_sufile_put_segment_usage - put a segment usage
375 * @sufile: inode of segment usage file
376 * @segnum: segment number
379 * Description: nilfs_sufile_put_segment_usage() releases the segment usage
380 * specified by @segnum. @bh must be the buffer head which have been returned
381 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
383 void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
384 struct buffer_head *bh)
391 * nilfs_sufile_get_stat - get segment usage statistics
392 * @sufile: inode of segment usage file
393 * @stat: pointer to a structure of segment usage statistics
395 * Description: nilfs_sufile_get_stat() returns information about segment
398 * Return Value: On success, 0 is returned, and segment usage information is
399 * stored in the place pointed by @stat. On error, one of the following
400 * negative error codes is returned.
404 * %-ENOMEM - Insufficient amount of memory available.
406 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
408 struct buffer_head *header_bh;
409 struct nilfs_sufile_header *header;
410 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
414 down_read(&NILFS_MDT(sufile)->mi_sem);
416 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
420 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
421 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
422 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
423 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
424 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
425 sustat->ss_ctime = nilfs->ns_ctime;
426 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
427 spin_lock(&nilfs->ns_last_segment_lock);
428 sustat->ss_prot_seq = nilfs->ns_prot_seq;
429 spin_unlock(&nilfs->ns_last_segment_lock);
430 kunmap_atomic(kaddr, KM_USER0);
434 up_read(&NILFS_MDT(sufile)->mi_sem);
439 * nilfs_sufile_get_ncleansegs - get the number of clean segments
440 * @sufile: inode of segment usage file
441 * @nsegsp: pointer to the number of clean segments
443 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
446 * Return Value: On success, 0 is returned and the number of clean segments is
447 * stored in the place pointed by @nsegsp. On error, one of the following
448 * negative error codes is returned.
452 * %-ENOMEM - Insufficient amount of memory available.
454 int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
456 struct nilfs_sustat sustat;
459 ret = nilfs_sufile_get_stat(sufile, &sustat);
461 *nsegsp = sustat.ss_ncleansegs;
465 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
466 struct buffer_head *header_bh,
467 struct buffer_head *su_bh)
469 struct nilfs_segment_usage *su;
473 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
474 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
475 if (nilfs_segment_usage_error(su)) {
476 kunmap_atomic(kaddr, KM_USER0);
479 suclean = nilfs_segment_usage_clean(su);
480 nilfs_segment_usage_set_error(su);
481 kunmap_atomic(kaddr, KM_USER0);
484 nilfs_sufile_mod_counter(header_bh, -1, 0);
485 nilfs_mdt_mark_buffer_dirty(su_bh);
486 nilfs_mdt_mark_dirty(sufile);
490 * nilfs_sufile_get_suinfo -
491 * @sufile: inode of segment usage file
492 * @segnum: segment number to start looking
493 * @si: array of suinfo
494 * @nsi: size of suinfo array
498 * Return Value: On success, 0 is returned and .... On error, one of the
499 * following negative error codes is returned.
503 * %-ENOMEM - Insufficient amount of memory available.
505 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum,
506 struct nilfs_suinfo *si, size_t nsi)
508 struct buffer_head *su_bh;
509 struct nilfs_segment_usage *su;
510 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
511 struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
513 unsigned long nsegs, segusages_per_block;
517 down_read(&NILFS_MDT(sufile)->mi_sem);
519 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
520 nsegs = min_t(unsigned long,
521 nilfs_sufile_get_nsegments(sufile) - segnum,
523 for (i = 0; i < nsegs; i += n, segnum += n) {
524 n = min_t(unsigned long,
525 segusages_per_block -
526 nilfs_sufile_get_offset(sufile, segnum),
528 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
534 memset(&si[i], 0, sizeof(struct nilfs_suinfo) * n);
538 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
539 su = nilfs_sufile_block_get_segment_usage(
540 sufile, segnum, su_bh, kaddr);
541 for (j = 0; j < n; j++, su = (void *)su + susz) {
542 si[i + j].sui_lastmod = le64_to_cpu(su->su_lastmod);
543 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks);
544 si[i + j].sui_flags = le32_to_cpu(su->su_flags) &
545 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
546 if (nilfs_segment_is_active(nilfs, segnum + j))
547 si[i + j].sui_flags |=
548 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
550 kunmap_atomic(kaddr, KM_USER0);
556 up_read(&NILFS_MDT(sufile)->mi_sem);