Merge branch 'u300' into devel
[linux-2.6] / fs / nilfs2 / sufile.c
1 /*
2  * sufile.c - NILFS segment usage file.
3  *
4  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Koji Sato <koji@osrg.net>.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/fs.h>
25 #include <linux/string.h>
26 #include <linux/buffer_head.h>
27 #include <linux/errno.h>
28 #include <linux/nilfs2_fs.h>
29 #include "mdt.h"
30 #include "sufile.h"
31
32
33 static inline unsigned long
34 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
35 {
36         return NILFS_MDT(sufile)->mi_entries_per_block;
37 }
38
39 static unsigned long
40 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
41 {
42         __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
43         do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
44         return (unsigned long)t;
45 }
46
47 static unsigned long
48 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
49 {
50         __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
51         return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
52 }
53
54 static unsigned long
55 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
56                                      __u64 max)
57 {
58         return min_t(unsigned long,
59                      nilfs_sufile_segment_usages_per_block(sufile) -
60                      nilfs_sufile_get_offset(sufile, curr),
61                      max - curr + 1);
62 }
63
64 static inline struct nilfs_sufile_header *
65 nilfs_sufile_block_get_header(const struct inode *sufile,
66                               struct buffer_head *bh,
67                               void *kaddr)
68 {
69         return kaddr + bh_offset(bh);
70 }
71
72 static struct nilfs_segment_usage *
73 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
74                                      struct buffer_head *bh, void *kaddr)
75 {
76         return kaddr + bh_offset(bh) +
77                 nilfs_sufile_get_offset(sufile, segnum) *
78                 NILFS_MDT(sufile)->mi_entry_size;
79 }
80
81 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
82                                                 struct buffer_head **bhp)
83 {
84         return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
85 }
86
87 static inline int
88 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
89                                      int create, struct buffer_head **bhp)
90 {
91         return nilfs_mdt_get_block(sufile,
92                                    nilfs_sufile_get_blkoff(sufile, segnum),
93                                    create, NULL, bhp);
94 }
95
96 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
97                                      u64 ncleanadd, u64 ndirtyadd)
98 {
99         struct nilfs_sufile_header *header;
100         void *kaddr;
101
102         kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
103         header = kaddr + bh_offset(header_bh);
104         le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
105         le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
106         kunmap_atomic(kaddr, KM_USER0);
107
108         nilfs_mdt_mark_buffer_dirty(header_bh);
109 }
110
111 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
112                         void (*dofunc)(struct inode *, __u64,
113                                        struct buffer_head *,
114                                        struct buffer_head *))
115 {
116         struct buffer_head *header_bh, *bh;
117         int ret;
118
119         if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
120                 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
121                        __func__, (unsigned long long)segnum);
122                 return -EINVAL;
123         }
124         down_write(&NILFS_MDT(sufile)->mi_sem);
125
126         ret = nilfs_sufile_get_header_block(sufile, &header_bh);
127         if (ret < 0)
128                 goto out_sem;
129
130         ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
131         if (!ret) {
132                 dofunc(sufile, segnum, header_bh, bh);
133                 brelse(bh);
134         }
135         brelse(header_bh);
136
137  out_sem:
138         up_write(&NILFS_MDT(sufile)->mi_sem);
139         return ret;
140 }
141
142 /**
143  * nilfs_sufile_alloc - allocate a segment
144  * @sufile: inode of segment usage file
145  * @segnump: pointer to segment number
146  *
147  * Description: nilfs_sufile_alloc() allocates a clean segment.
148  *
149  * Return Value: On success, 0 is returned and the segment number of the
150  * allocated segment is stored in the place pointed by @segnump. On error, one
151  * of the following negative error codes is returned.
152  *
153  * %-EIO - I/O error.
154  *
155  * %-ENOMEM - Insufficient amount of memory available.
156  *
157  * %-ENOSPC - No clean segment left.
158  */
159 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
160 {
161         struct buffer_head *header_bh, *su_bh;
162         struct nilfs_sufile_header *header;
163         struct nilfs_segment_usage *su;
164         size_t susz = NILFS_MDT(sufile)->mi_entry_size;
165         __u64 segnum, maxsegnum, last_alloc;
166         void *kaddr;
167         unsigned long nsegments, ncleansegs, nsus;
168         int ret, i, j;
169
170         down_write(&NILFS_MDT(sufile)->mi_sem);
171
172         ret = nilfs_sufile_get_header_block(sufile, &header_bh);
173         if (ret < 0)
174                 goto out_sem;
175         kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
176         header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
177         ncleansegs = le64_to_cpu(header->sh_ncleansegs);
178         last_alloc = le64_to_cpu(header->sh_last_alloc);
179         kunmap_atomic(kaddr, KM_USER0);
180
181         nsegments = nilfs_sufile_get_nsegments(sufile);
182         segnum = last_alloc + 1;
183         maxsegnum = nsegments - 1;
184         for (i = 0; i < nsegments; i += nsus) {
185                 if (segnum >= nsegments) {
186                         /* wrap around */
187                         segnum = 0;
188                         maxsegnum = last_alloc;
189                 }
190                 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
191                                                            &su_bh);
192                 if (ret < 0)
193                         goto out_header;
194                 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
195                 su = nilfs_sufile_block_get_segment_usage(
196                         sufile, segnum, su_bh, kaddr);
197
198                 nsus = nilfs_sufile_segment_usages_in_block(
199                         sufile, segnum, maxsegnum);
200                 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
201                         if (!nilfs_segment_usage_clean(su))
202                                 continue;
203                         /* found a clean segment */
204                         nilfs_segment_usage_set_dirty(su);
205                         kunmap_atomic(kaddr, KM_USER0);
206
207                         kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
208                         header = nilfs_sufile_block_get_header(
209                                 sufile, header_bh, kaddr);
210                         le64_add_cpu(&header->sh_ncleansegs, -1);
211                         le64_add_cpu(&header->sh_ndirtysegs, 1);
212                         header->sh_last_alloc = cpu_to_le64(segnum);
213                         kunmap_atomic(kaddr, KM_USER0);
214
215                         nilfs_mdt_mark_buffer_dirty(header_bh);
216                         nilfs_mdt_mark_buffer_dirty(su_bh);
217                         nilfs_mdt_mark_dirty(sufile);
218                         brelse(su_bh);
219                         *segnump = segnum;
220                         goto out_header;
221                 }
222
223                 kunmap_atomic(kaddr, KM_USER0);
224                 brelse(su_bh);
225         }
226
227         /* no segments left */
228         ret = -ENOSPC;
229
230  out_header:
231         brelse(header_bh);
232
233  out_sem:
234         up_write(&NILFS_MDT(sufile)->mi_sem);
235         return ret;
236 }
237
238 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
239                                  struct buffer_head *header_bh,
240                                  struct buffer_head *su_bh)
241 {
242         struct nilfs_segment_usage *su;
243         void *kaddr;
244
245         kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
246         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
247         if (unlikely(!nilfs_segment_usage_clean(su))) {
248                 printk(KERN_WARNING "%s: segment %llu must be clean\n",
249                        __func__, (unsigned long long)segnum);
250                 kunmap_atomic(kaddr, KM_USER0);
251                 return;
252         }
253         nilfs_segment_usage_set_dirty(su);
254         kunmap_atomic(kaddr, KM_USER0);
255
256         nilfs_sufile_mod_counter(header_bh, -1, 1);
257         nilfs_mdt_mark_buffer_dirty(su_bh);
258         nilfs_mdt_mark_dirty(sufile);
259 }
260
261 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
262                            struct buffer_head *header_bh,
263                            struct buffer_head *su_bh)
264 {
265         struct nilfs_segment_usage *su;
266         void *kaddr;
267         int clean, dirty;
268
269         kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
270         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
271         if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
272             su->su_nblocks == cpu_to_le32(0)) {
273                 kunmap_atomic(kaddr, KM_USER0);
274                 return;
275         }
276         clean = nilfs_segment_usage_clean(su);
277         dirty = nilfs_segment_usage_dirty(su);
278
279         /* make the segment garbage */
280         su->su_lastmod = cpu_to_le64(0);
281         su->su_nblocks = cpu_to_le32(0);
282         su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
283         kunmap_atomic(kaddr, KM_USER0);
284
285         nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
286         nilfs_mdt_mark_buffer_dirty(su_bh);
287         nilfs_mdt_mark_dirty(sufile);
288 }
289
290 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
291                           struct buffer_head *header_bh,
292                           struct buffer_head *su_bh)
293 {
294         struct nilfs_segment_usage *su;
295         void *kaddr;
296         int sudirty;
297
298         kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
299         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
300         if (nilfs_segment_usage_clean(su)) {
301                 printk(KERN_WARNING "%s: segment %llu is already clean\n",
302                        __func__, (unsigned long long)segnum);
303                 kunmap_atomic(kaddr, KM_USER0);
304                 return;
305         }
306         WARN_ON(nilfs_segment_usage_error(su));
307         WARN_ON(!nilfs_segment_usage_dirty(su));
308
309         sudirty = nilfs_segment_usage_dirty(su);
310         nilfs_segment_usage_set_clean(su);
311         kunmap_atomic(kaddr, KM_USER0);
312         nilfs_mdt_mark_buffer_dirty(su_bh);
313
314         nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
315         nilfs_mdt_mark_dirty(sufile);
316 }
317
318 /**
319  * nilfs_sufile_get_segment_usage - get a segment usage
320  * @sufile: inode of segment usage file
321  * @segnum: segment number
322  * @sup: pointer to segment usage
323  * @bhp: pointer to buffer head
324  *
325  * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
326  * specified by @segnum.
327  *
328  * Return Value: On success, 0 is returned, and the segment usage and the
329  * buffer head of the buffer on which the segment usage is located are stored
330  * in the place pointed by @sup and @bhp, respectively. On error, one of the
331  * following negative error codes is returned.
332  *
333  * %-EIO - I/O error.
334  *
335  * %-ENOMEM - Insufficient amount of memory available.
336  *
337  * %-EINVAL - Invalid segment usage number.
338  */
339 int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
340                                    struct nilfs_segment_usage **sup,
341                                    struct buffer_head **bhp)
342 {
343         struct buffer_head *bh;
344         struct nilfs_segment_usage *su;
345         void *kaddr;
346         int ret;
347
348         /* segnum is 0 origin */
349         if (segnum >= nilfs_sufile_get_nsegments(sufile))
350                 return -EINVAL;
351         down_write(&NILFS_MDT(sufile)->mi_sem);
352         ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
353         if (ret < 0)
354                 goto out_sem;
355         kaddr = kmap(bh->b_page);
356         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
357         if (nilfs_segment_usage_error(su)) {
358                 kunmap(bh->b_page);
359                 brelse(bh);
360                 ret = -EINVAL;
361                 goto out_sem;
362         }
363
364         if (sup != NULL)
365                 *sup = su;
366         *bhp = bh;
367
368  out_sem:
369         up_write(&NILFS_MDT(sufile)->mi_sem);
370         return ret;
371 }
372
373 /**
374  * nilfs_sufile_put_segment_usage - put a segment usage
375  * @sufile: inode of segment usage file
376  * @segnum: segment number
377  * @bh: buffer head
378  *
379  * Description: nilfs_sufile_put_segment_usage() releases the segment usage
380  * specified by @segnum. @bh must be the buffer head which have been returned
381  * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
382  */
383 void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
384                                     struct buffer_head *bh)
385 {
386         kunmap(bh->b_page);
387         brelse(bh);
388 }
389
390 /**
391  * nilfs_sufile_get_stat - get segment usage statistics
392  * @sufile: inode of segment usage file
393  * @stat: pointer to a structure of segment usage statistics
394  *
395  * Description: nilfs_sufile_get_stat() returns information about segment
396  * usage.
397  *
398  * Return Value: On success, 0 is returned, and segment usage information is
399  * stored in the place pointed by @stat. On error, one of the following
400  * negative error codes is returned.
401  *
402  * %-EIO - I/O error.
403  *
404  * %-ENOMEM - Insufficient amount of memory available.
405  */
406 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
407 {
408         struct buffer_head *header_bh;
409         struct nilfs_sufile_header *header;
410         struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
411         void *kaddr;
412         int ret;
413
414         down_read(&NILFS_MDT(sufile)->mi_sem);
415
416         ret = nilfs_sufile_get_header_block(sufile, &header_bh);
417         if (ret < 0)
418                 goto out_sem;
419
420         kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
421         header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
422         sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
423         sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
424         sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
425         sustat->ss_ctime = nilfs->ns_ctime;
426         sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
427         spin_lock(&nilfs->ns_last_segment_lock);
428         sustat->ss_prot_seq = nilfs->ns_prot_seq;
429         spin_unlock(&nilfs->ns_last_segment_lock);
430         kunmap_atomic(kaddr, KM_USER0);
431         brelse(header_bh);
432
433  out_sem:
434         up_read(&NILFS_MDT(sufile)->mi_sem);
435         return ret;
436 }
437
438 /**
439  * nilfs_sufile_get_ncleansegs - get the number of clean segments
440  * @sufile: inode of segment usage file
441  * @nsegsp: pointer to the number of clean segments
442  *
443  * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
444  * segments.
445  *
446  * Return Value: On success, 0 is returned and the number of clean segments is
447  * stored in the place pointed by @nsegsp. On error, one of the following
448  * negative error codes is returned.
449  *
450  * %-EIO - I/O error.
451  *
452  * %-ENOMEM - Insufficient amount of memory available.
453  */
454 int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
455 {
456         struct nilfs_sustat sustat;
457         int ret;
458
459         ret = nilfs_sufile_get_stat(sufile, &sustat);
460         if (ret == 0)
461                 *nsegsp = sustat.ss_ncleansegs;
462         return ret;
463 }
464
465 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
466                                struct buffer_head *header_bh,
467                                struct buffer_head *su_bh)
468 {
469         struct nilfs_segment_usage *su;
470         void *kaddr;
471         int suclean;
472
473         kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
474         su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
475         if (nilfs_segment_usage_error(su)) {
476                 kunmap_atomic(kaddr, KM_USER0);
477                 return;
478         }
479         suclean = nilfs_segment_usage_clean(su);
480         nilfs_segment_usage_set_error(su);
481         kunmap_atomic(kaddr, KM_USER0);
482
483         if (suclean)
484                 nilfs_sufile_mod_counter(header_bh, -1, 0);
485         nilfs_mdt_mark_buffer_dirty(su_bh);
486         nilfs_mdt_mark_dirty(sufile);
487 }
488
489 /**
490  * nilfs_sufile_get_suinfo -
491  * @sufile: inode of segment usage file
492  * @segnum: segment number to start looking
493  * @si: array of suinfo
494  * @nsi: size of suinfo array
495  *
496  * Description:
497  *
498  * Return Value: On success, 0 is returned and .... On error, one of the
499  * following negative error codes is returned.
500  *
501  * %-EIO - I/O error.
502  *
503  * %-ENOMEM - Insufficient amount of memory available.
504  */
505 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum,
506                                 struct nilfs_suinfo *si, size_t nsi)
507 {
508         struct buffer_head *su_bh;
509         struct nilfs_segment_usage *su;
510         size_t susz = NILFS_MDT(sufile)->mi_entry_size;
511         struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
512         void *kaddr;
513         unsigned long nsegs, segusages_per_block;
514         ssize_t n;
515         int ret, i, j;
516
517         down_read(&NILFS_MDT(sufile)->mi_sem);
518
519         segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
520         nsegs = min_t(unsigned long,
521                       nilfs_sufile_get_nsegments(sufile) - segnum,
522                       nsi);
523         for (i = 0; i < nsegs; i += n, segnum += n) {
524                 n = min_t(unsigned long,
525                           segusages_per_block -
526                                   nilfs_sufile_get_offset(sufile, segnum),
527                           nsegs - i);
528                 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
529                                                            &su_bh);
530                 if (ret < 0) {
531                         if (ret != -ENOENT)
532                                 goto out;
533                         /* hole */
534                         memset(&si[i], 0, sizeof(struct nilfs_suinfo) * n);
535                         continue;
536                 }
537
538                 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
539                 su = nilfs_sufile_block_get_segment_usage(
540                         sufile, segnum, su_bh, kaddr);
541                 for (j = 0; j < n; j++, su = (void *)su + susz) {
542                         si[i + j].sui_lastmod = le64_to_cpu(su->su_lastmod);
543                         si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks);
544                         si[i + j].sui_flags = le32_to_cpu(su->su_flags) &
545                                 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
546                         if (nilfs_segment_is_active(nilfs, segnum + j))
547                                 si[i + j].sui_flags |=
548                                         (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
549                 }
550                 kunmap_atomic(kaddr, KM_USER0);
551                 brelse(su_bh);
552         }
553         ret = nsegs;
554
555  out:
556         up_read(&NILFS_MDT(sufile)->mi_sem);
557         return ret;
558 }