[GFS2] [-mm patch] fs/gfs2/: possible cleanups
[linux-2.6] / fs / gfs2 / eattr.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License v.2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/xattr.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <asm/semaphore.h>
18 #include <asm/uaccess.h>
19
20 #include "gfs2.h"
21 #include "lm_interface.h"
22 #include "incore.h"
23 #include "acl.h"
24 #include "eaops.h"
25 #include "eattr.h"
26 #include "glock.h"
27 #include "inode.h"
28 #include "meta_io.h"
29 #include "quota.h"
30 #include "rgrp.h"
31 #include "trans.h"
32 #include "util.h"
33
34 /**
35  * ea_calc_size - returns the acutal number of bytes the request will take up
36  *                (not counting any unstuffed data blocks)
37  * @sdp:
38  * @er:
39  * @size:
40  *
41  * Returns: 1 if the EA should be stuffed
42  */
43
44 static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
45                         unsigned int *size)
46 {
47         *size = GFS2_EAREQ_SIZE_STUFFED(er);
48         if (*size <= sdp->sd_jbsize)
49                 return 1;
50
51         *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
52
53         return 0;
54 }
55
56 static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
57 {
58         unsigned int size;
59
60         if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
61                 return -ERANGE;
62
63         ea_calc_size(sdp, er, &size);
64
65         /* This can only happen with 512 byte blocks */
66         if (size > sdp->sd_jbsize)
67                 return -ERANGE;
68
69         return 0;
70 }
71
72 typedef int (*ea_call_t) (struct gfs2_inode *ip,
73                           struct buffer_head *bh,
74                           struct gfs2_ea_header *ea,
75                           struct gfs2_ea_header *prev,
76                           void *private);
77
78 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
79                         ea_call_t ea_call, void *data)
80 {
81         struct gfs2_ea_header *ea, *prev = NULL;
82         int error = 0;
83
84         if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_EA))
85                 return -EIO;
86
87         for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
88                 if (!GFS2_EA_REC_LEN(ea))
89                         goto fail;
90                 if (!(bh->b_data <= (char *)ea &&
91                       (char *)GFS2_EA2NEXT(ea) <=
92                       bh->b_data + bh->b_size))
93                         goto fail;
94                 if (!GFS2_EATYPE_VALID(ea->ea_type))
95                         goto fail;
96
97                 error = ea_call(ip, bh, ea, prev, data);
98                 if (error)
99                         return error;
100
101                 if (GFS2_EA_IS_LAST(ea)) {
102                         if ((char *)GFS2_EA2NEXT(ea) !=
103                             bh->b_data + bh->b_size)
104                                 goto fail;
105                         break;
106                 }
107         }
108
109         return error;
110
111  fail:
112         gfs2_consist_inode(ip);
113         return -EIO;
114 }
115
116 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
117 {
118         struct buffer_head *bh, *eabh;
119         uint64_t *eablk, *end;
120         int error;
121
122         error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
123                                DIO_START | DIO_WAIT, &bh);
124         if (error)
125                 return error;
126
127         if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
128                 error = ea_foreach_i(ip, bh, ea_call, data);
129                 goto out;
130         }
131
132         if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_IN)) {
133                 error = -EIO;
134                 goto out;
135         }
136
137         eablk = (uint64_t *)(bh->b_data + sizeof(struct gfs2_meta_header));
138         end = eablk + ip->i_sbd->sd_inptrs;
139
140         for (; eablk < end; eablk++) {
141                 uint64_t bn;
142
143                 if (!*eablk)
144                         break;
145                 bn = be64_to_cpu(*eablk);
146
147                 error = gfs2_meta_read(ip->i_gl, bn, DIO_START | DIO_WAIT,
148                                        &eabh);
149                 if (error)
150                         break;
151                 error = ea_foreach_i(ip, eabh, ea_call, data);
152                 brelse(eabh);
153                 if (error)
154                         break;
155         }
156  out:
157         brelse(bh);
158
159         return error;
160 }
161
162 struct ea_find {
163         struct gfs2_ea_request *ef_er;
164         struct gfs2_ea_location *ef_el;
165 };
166
167 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
168                      struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
169                      void *private)
170 {
171         struct ea_find *ef = private;
172         struct gfs2_ea_request *er = ef->ef_er;
173
174         if (ea->ea_type == GFS2_EATYPE_UNUSED)
175                 return 0;
176
177         if (ea->ea_type == er->er_type) {
178                 if (ea->ea_name_len == er->er_name_len &&
179                     !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
180                         struct gfs2_ea_location *el = ef->ef_el;
181                         get_bh(bh);
182                         el->el_bh = bh;
183                         el->el_ea = ea;
184                         el->el_prev = prev;
185                         return 1;
186                 }
187         }
188
189 #if 0
190         else if ((ip->i_di.di_flags & GFS2_DIF_EA_PACKED) &&
191                  er->er_type == GFS2_EATYPE_SYS)
192                 return 1;
193 #endif
194
195         return 0;
196 }
197
198 int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
199                  struct gfs2_ea_location *el)
200 {
201         struct ea_find ef;
202         int error;
203
204         ef.ef_er = er;
205         ef.ef_el = el;
206
207         memset(el, 0, sizeof(struct gfs2_ea_location));
208
209         error = ea_foreach(ip, ea_find_i, &ef);
210         if (error > 0)
211                 return 0;
212
213         return error;
214 }
215
216 /**
217  * ea_dealloc_unstuffed -
218  * @ip:
219  * @bh:
220  * @ea:
221  * @prev:
222  * @private:
223  *
224  * Take advantage of the fact that all unstuffed blocks are
225  * allocated from the same RG.  But watch, this may not always
226  * be true.
227  *
228  * Returns: errno
229  */
230
231 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
232                                 struct gfs2_ea_header *ea,
233                                 struct gfs2_ea_header *prev, void *private)
234 {
235         int *leave = private;
236         struct gfs2_sbd *sdp = ip->i_sbd;
237         struct gfs2_rgrpd *rgd;
238         struct gfs2_holder rg_gh;
239         struct buffer_head *dibh;
240         uint64_t *dataptrs, bn = 0;
241         uint64_t bstart = 0;
242         unsigned int blen = 0;
243         unsigned int blks = 0;
244         unsigned int x;
245         int error;
246
247         if (GFS2_EA_IS_STUFFED(ea))
248                 return 0;
249
250         dataptrs = GFS2_EA2DATAPTRS(ea);
251         for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++)
252                 if (*dataptrs) {
253                         blks++;
254                         bn = be64_to_cpu(*dataptrs);
255                 }
256         if (!blks)
257                 return 0;
258
259         rgd = gfs2_blk2rgrpd(sdp, bn);
260         if (!rgd) {
261                 gfs2_consist_inode(ip);
262                 return -EIO;
263         }
264
265         error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
266         if (error)
267                 return error;
268
269         error = gfs2_trans_begin(sdp, rgd->rd_ri.ri_length +
270                                  RES_DINODE + RES_EATTR + RES_STATFS +
271                                  RES_QUOTA, blks);
272         if (error)
273                 goto out_gunlock;
274
275         gfs2_trans_add_bh(ip->i_gl, bh, 1);
276
277         dataptrs = GFS2_EA2DATAPTRS(ea);
278         for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
279                 if (!*dataptrs)
280                         break;
281                 bn = be64_to_cpu(*dataptrs);
282
283                 if (bstart + blen == bn)
284                         blen++;
285                 else {
286                         if (bstart)
287                                 gfs2_free_meta(ip, bstart, blen);
288                         bstart = bn;
289                         blen = 1;
290                 }
291
292                 *dataptrs = 0;
293                 if (!ip->i_di.di_blocks)
294                         gfs2_consist_inode(ip);
295                 ip->i_di.di_blocks--;
296         }
297         if (bstart)
298                 gfs2_free_meta(ip, bstart, blen);
299
300         if (prev && !leave) {
301                 uint32_t len;
302
303                 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
304                 prev->ea_rec_len = cpu_to_be32(len);
305
306                 if (GFS2_EA_IS_LAST(ea))
307                         prev->ea_flags |= GFS2_EAFLAG_LAST;
308         } else {
309                 ea->ea_type = GFS2_EATYPE_UNUSED;
310                 ea->ea_num_ptrs = 0;
311         }
312
313         error = gfs2_meta_inode_buffer(ip, &dibh);
314         if (!error) {
315                 ip->i_di.di_ctime = get_seconds();
316                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
317                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
318                 brelse(dibh);
319         }
320
321         gfs2_trans_end(sdp);
322
323  out_gunlock:
324         gfs2_glock_dq_uninit(&rg_gh);
325
326         return error;
327 }
328
329 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
330                                struct gfs2_ea_header *ea,
331                                struct gfs2_ea_header *prev, int leave)
332 {
333         struct gfs2_alloc *al;
334         int error;
335
336         al = gfs2_alloc_get(ip);
337
338         error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
339         if (error)
340                 goto out_alloc;
341
342         error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
343         if (error)
344                 goto out_quota;
345
346         error = ea_dealloc_unstuffed(ip,
347                                      bh, ea, prev,
348                                      (leave) ? &error : NULL);
349
350         gfs2_glock_dq_uninit(&al->al_ri_gh);
351
352  out_quota:
353         gfs2_quota_unhold(ip);
354
355  out_alloc:
356         gfs2_alloc_put(ip);
357
358         return error;
359 }
360
361 #if 0
362
363 static int gfs2_ea_repack_i(struct gfs2_inode *ip)
364 {
365         return -EOPNOTSUPP;
366 }
367
368 int gfs2_ea_repack(struct gfs2_inode *ip)
369 {
370         struct gfs2_holder gh;
371         int error;
372
373         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
374         if (error)
375                 return error;
376
377         /* Some sort of permissions checking would be nice */
378
379         error = gfs2_ea_repack_i(ip);
380
381         gfs2_glock_dq_uninit(&gh);
382
383         return error;
384 }
385
386 #endif  /*  0  */
387
388 struct ea_list {
389         struct gfs2_ea_request *ei_er;
390         unsigned int ei_size;
391 };
392
393 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
394                      struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
395                      void *private)
396 {
397         struct ea_list *ei = private;
398         struct gfs2_ea_request *er = ei->ei_er;
399         unsigned int ea_size = GFS2_EA_STRLEN(ea);
400
401         if (ea->ea_type == GFS2_EATYPE_UNUSED)
402                 return 0;
403
404         if (er->er_data_len) {
405                 char *prefix;
406                 unsigned int l;
407                 char c = 0;
408
409                 if (ei->ei_size + ea_size > er->er_data_len)
410                         return -ERANGE;
411
412                 if (ea->ea_type == GFS2_EATYPE_USR) {
413                         prefix = "user.";
414                         l = 5;
415                 } else {
416                         prefix = "system.";
417                         l = 7;
418                 }
419
420                 memcpy(er->er_data + ei->ei_size,
421                        prefix, l);
422                 memcpy(er->er_data + ei->ei_size + l,
423                        GFS2_EA2NAME(ea),
424                        ea->ea_name_len);
425                 memcpy(er->er_data + ei->ei_size +
426                        ea_size - 1,
427                        &c, 1);
428         }
429
430         ei->ei_size += ea_size;
431
432         return 0;
433 }
434
435 /**
436  * gfs2_ea_list -
437  * @ip:
438  * @er:
439  *
440  * Returns: actual size of data on success, -errno on error
441  */
442
443 int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
444 {
445         struct gfs2_holder i_gh;
446         int error;
447
448         if (!er->er_data || !er->er_data_len) {
449                 er->er_data = NULL;
450                 er->er_data_len = 0;
451         }
452
453         error = gfs2_glock_nq_init(ip->i_gl,
454                                   LM_ST_SHARED, LM_FLAG_ANY,
455                                   &i_gh);
456         if (error)
457                 return error;
458
459         if (ip->i_di.di_eattr) {
460                 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
461
462                 error = ea_foreach(ip, ea_list_i, &ei);
463                 if (!error)
464                         error = ei.ei_size;
465         }
466
467         gfs2_glock_dq_uninit(&i_gh);
468
469         return error;
470 }
471
472 /**
473  * ea_get_unstuffed - actually copies the unstuffed data into the
474  *                    request buffer
475  * @ip:
476  * @ea:
477  * @data:
478  *
479  * Returns: errno
480  */
481
482 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
483                             char *data)
484 {
485         struct gfs2_sbd *sdp = ip->i_sbd;
486         struct buffer_head **bh;
487         unsigned int amount = GFS2_EA_DATA_LEN(ea);
488         unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
489         uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
490         unsigned int x;
491         int error = 0;
492
493         bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
494         if (!bh)
495                 return -ENOMEM;
496
497         for (x = 0; x < nptrs; x++) {
498                 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
499                                        DIO_START, bh + x);
500                 if (error) {
501                         while (x--)
502                                 brelse(bh[x]);
503                         goto out;
504                 }
505                 dataptrs++;
506         }
507
508         for (x = 0; x < nptrs; x++) {
509                 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
510                 if (error) {
511                         for (; x < nptrs; x++)
512                                 brelse(bh[x]);
513                         goto out;
514                 }
515                 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
516                         for (; x < nptrs; x++)
517                                 brelse(bh[x]);
518                         error = -EIO;
519                         goto out;
520                 }
521
522                 memcpy(data,
523                        bh[x]->b_data + sizeof(struct gfs2_meta_header),
524                        (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
525
526                 amount -= sdp->sd_jbsize;
527                 data += sdp->sd_jbsize;
528
529                 brelse(bh[x]);
530         }
531
532  out:
533         kfree(bh);
534
535         return error;
536 }
537
538 int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
539                      char *data)
540 {
541         if (GFS2_EA_IS_STUFFED(el->el_ea)) {
542                 memcpy(data,
543                        GFS2_EA2DATA(el->el_ea),
544                        GFS2_EA_DATA_LEN(el->el_ea));
545                 return 0;
546         } else
547                 return ea_get_unstuffed(ip, el->el_ea, data);
548 }
549
550 /**
551  * gfs2_ea_get_i -
552  * @ip:
553  * @er:
554  *
555  * Returns: actual size of data on success, -errno on error
556  */
557
558 int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
559 {
560         struct gfs2_ea_location el;
561         int error;
562
563         if (!ip->i_di.di_eattr)
564                 return -ENODATA;
565
566         error = gfs2_ea_find(ip, er, &el);
567         if (error)
568                 return error;
569         if (!el.el_ea)
570                 return -ENODATA;
571
572         if (er->er_data_len) {
573                 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
574                         error =  -ERANGE;
575                 else
576                         error = gfs2_ea_get_copy(ip, &el, er->er_data);
577         }
578         if (!error)
579                 error = GFS2_EA_DATA_LEN(el.el_ea);
580
581         brelse(el.el_bh);
582
583         return error;
584 }
585
586 /**
587  * gfs2_ea_get -
588  * @ip:
589  * @er:
590  *
591  * Returns: actual size of data on success, -errno on error
592  */
593
594 int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
595 {
596         struct gfs2_holder i_gh;
597         int error;
598
599         if (!er->er_name_len ||
600             er->er_name_len > GFS2_EA_MAX_NAME_LEN)
601                 return -EINVAL;
602         if (!er->er_data || !er->er_data_len) {
603                 er->er_data = NULL;
604                 er->er_data_len = 0;
605         }
606
607         error = gfs2_glock_nq_init(ip->i_gl,
608                                   LM_ST_SHARED, LM_FLAG_ANY,
609                                   &i_gh);
610         if (error)
611                 return error;
612
613         error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
614
615         gfs2_glock_dq_uninit(&i_gh);
616
617         return error;
618 }
619
620 /**
621  * ea_alloc_blk - allocates a new block for extended attributes.
622  * @ip: A pointer to the inode that's getting extended attributes
623  * @bhp:
624  *
625  * Returns: errno
626  */
627
628 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
629 {
630         struct gfs2_sbd *sdp = ip->i_sbd;
631         struct gfs2_ea_header *ea;
632         uint64_t block;
633
634         block = gfs2_alloc_meta(ip);
635
636         *bhp = gfs2_meta_new(ip->i_gl, block);
637         gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
638         gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
639         gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
640
641         ea = GFS2_EA_BH2FIRST(*bhp);
642         ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
643         ea->ea_type = GFS2_EATYPE_UNUSED;
644         ea->ea_flags = GFS2_EAFLAG_LAST;
645         ea->ea_num_ptrs = 0;
646
647         ip->i_di.di_blocks++;
648
649         return 0;
650 }
651
652 /**
653  * ea_write - writes the request info to an ea, creating new blocks if
654  *            necessary
655  * @ip:  inode that is being modified
656  * @ea:  the location of the new ea in a block
657  * @er: the write request
658  *
659  * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
660  *
661  * returns : errno
662  */
663
664 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
665                     struct gfs2_ea_request *er)
666 {
667         struct gfs2_sbd *sdp = ip->i_sbd;
668
669         ea->ea_data_len = cpu_to_be32(er->er_data_len);
670         ea->ea_name_len = er->er_name_len;
671         ea->ea_type = er->er_type;
672         ea->__pad = 0;
673
674         memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
675
676         if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
677                 ea->ea_num_ptrs = 0;
678                 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
679         } else {
680                 uint64_t *dataptr = GFS2_EA2DATAPTRS(ea);
681                 const char *data = er->er_data;
682                 unsigned int data_len = er->er_data_len;
683                 unsigned int copy;
684                 unsigned int x;
685
686                 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
687                 for (x = 0; x < ea->ea_num_ptrs; x++) {
688                         struct buffer_head *bh;
689                         uint64_t block;
690                         int mh_size = sizeof(struct gfs2_meta_header);
691
692                         block = gfs2_alloc_meta(ip);
693
694                         bh = gfs2_meta_new(ip->i_gl, block);
695                         gfs2_trans_add_bh(ip->i_gl, bh, 1);
696                         gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
697
698                         ip->i_di.di_blocks++;
699
700                         copy = (data_len > sdp->sd_jbsize) ? sdp->sd_jbsize :
701                                                              data_len;
702                         memcpy(bh->b_data + mh_size, data, copy);
703                         if (copy < sdp->sd_jbsize)
704                                 memset(bh->b_data + mh_size + copy, 0,
705                                        sdp->sd_jbsize - copy);
706
707                         *dataptr++ = cpu_to_be64((uint64_t)bh->b_blocknr);
708                         data += copy;
709                         data_len -= copy;
710
711                         brelse(bh);
712                 }
713
714                 gfs2_assert_withdraw(sdp, !data_len);
715         }
716
717         return 0;
718 }
719
720 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
721                                    struct gfs2_ea_request *er,
722                                    void *private);
723
724 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
725                              unsigned int blks,
726                              ea_skeleton_call_t skeleton_call,
727                              void *private)
728 {
729         struct gfs2_alloc *al;
730         struct buffer_head *dibh;
731         int error;
732
733         al = gfs2_alloc_get(ip);
734
735         error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
736         if (error)
737                 goto out;
738
739         error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
740         if (error)
741                 goto out_gunlock_q;
742
743         al->al_requested = blks;
744
745         error = gfs2_inplace_reserve(ip);
746         if (error)
747                 goto out_gunlock_q;
748
749         error = gfs2_trans_begin(ip->i_sbd,
750                                  blks + al->al_rgd->rd_ri.ri_length +
751                                  RES_DINODE + RES_STATFS + RES_QUOTA, 0);
752         if (error)
753                 goto out_ipres;
754
755         error = skeleton_call(ip, er, private);
756         if (error)
757                 goto out_end_trans;
758
759         error = gfs2_meta_inode_buffer(ip, &dibh);
760         if (!error) {
761                 if (er->er_flags & GFS2_ERF_MODE) {
762                         gfs2_assert_withdraw(ip->i_sbd,
763                                             (ip->i_di.di_mode & S_IFMT) ==
764                                             (er->er_mode & S_IFMT));
765                         ip->i_di.di_mode = er->er_mode;
766                 }
767                 ip->i_di.di_ctime = get_seconds();
768                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
769                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
770                 brelse(dibh);
771         }
772
773  out_end_trans:
774         gfs2_trans_end(ip->i_sbd);
775
776  out_ipres:
777         gfs2_inplace_release(ip);
778
779  out_gunlock_q:
780         gfs2_quota_unlock(ip);
781
782  out:
783         gfs2_alloc_put(ip);
784
785         return error;
786 }
787
788 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
789                      void *private)
790 {
791         struct buffer_head *bh;
792         int error;
793
794         error = ea_alloc_blk(ip, &bh);
795         if (error)
796                 return error;
797
798         ip->i_di.di_eattr = bh->b_blocknr;
799         error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
800
801         brelse(bh);
802
803         return error;
804 }
805
806 /**
807  * ea_init - initializes a new eattr block
808  * @ip:
809  * @er:
810  *
811  * Returns: errno
812  */
813
814 static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
815 {
816         unsigned int jbsize = ip->i_sbd->sd_jbsize;
817         unsigned int blks = 1;
818
819         if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
820                 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
821
822         return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
823 }
824
825 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
826 {
827         uint32_t ea_size = GFS2_EA_SIZE(ea);
828         struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
829                                      ea_size);
830         uint32_t new_size = GFS2_EA_REC_LEN(ea) - ea_size;
831         int last = ea->ea_flags & GFS2_EAFLAG_LAST;
832
833         ea->ea_rec_len = cpu_to_be32(ea_size);
834         ea->ea_flags ^= last;
835
836         new->ea_rec_len = cpu_to_be32(new_size);
837         new->ea_flags = last;
838
839         return new;
840 }
841
842 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
843                                   struct gfs2_ea_location *el)
844 {
845         struct gfs2_ea_header *ea = el->el_ea;
846         struct gfs2_ea_header *prev = el->el_prev;
847         uint32_t len;
848
849         gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
850
851         if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
852                 ea->ea_type = GFS2_EATYPE_UNUSED;
853                 return;
854         } else if (GFS2_EA2NEXT(prev) != ea) {
855                 prev = GFS2_EA2NEXT(prev);
856                 gfs2_assert_withdraw(ip->i_sbd, GFS2_EA2NEXT(prev) == ea);
857         }
858
859         len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
860         prev->ea_rec_len = cpu_to_be32(len);
861
862         if (GFS2_EA_IS_LAST(ea))
863                 prev->ea_flags |= GFS2_EAFLAG_LAST;
864 }
865
866 struct ea_set {
867         int ea_split;
868
869         struct gfs2_ea_request *es_er;
870         struct gfs2_ea_location *es_el;
871
872         struct buffer_head *es_bh;
873         struct gfs2_ea_header *es_ea;
874 };
875
876 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
877                                  struct gfs2_ea_header *ea, struct ea_set *es)
878 {
879         struct gfs2_ea_request *er = es->es_er;
880         struct buffer_head *dibh;
881         int error;
882
883         error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + 2 * RES_EATTR, 0);
884         if (error)
885                 return error;
886
887         gfs2_trans_add_bh(ip->i_gl, bh, 1);
888
889         if (es->ea_split)
890                 ea = ea_split_ea(ea);
891
892         ea_write(ip, ea, er);
893
894         if (es->es_el)
895                 ea_set_remove_stuffed(ip, es->es_el);
896
897         error = gfs2_meta_inode_buffer(ip, &dibh);
898         if (error)
899                 goto out;
900
901         if (er->er_flags & GFS2_ERF_MODE) {
902                 gfs2_assert_withdraw(ip->i_sbd,
903                         (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
904                 ip->i_di.di_mode = er->er_mode;
905         }
906         ip->i_di.di_ctime = get_seconds();
907         gfs2_trans_add_bh(ip->i_gl, dibh, 1);
908         gfs2_dinode_out(&ip->i_di, dibh->b_data);
909         brelse(dibh);
910  out:
911         gfs2_trans_end(ip->i_sbd);
912
913         return error;
914 }
915
916 static int ea_set_simple_alloc(struct gfs2_inode *ip,
917                                struct gfs2_ea_request *er, void *private)
918 {
919         struct ea_set *es = private;
920         struct gfs2_ea_header *ea = es->es_ea;
921         int error;
922
923         gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
924
925         if (es->ea_split)
926                 ea = ea_split_ea(ea);
927
928         error = ea_write(ip, ea, er);
929         if (error)
930                 return error;
931
932         if (es->es_el)
933                 ea_set_remove_stuffed(ip, es->es_el);
934
935         return 0;
936 }
937
938 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
939                          struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
940                          void *private)
941 {
942         struct ea_set *es = private;
943         unsigned int size;
944         int stuffed;
945         int error;
946
947         stuffed = ea_calc_size(ip->i_sbd, es->es_er, &size);
948
949         if (ea->ea_type == GFS2_EATYPE_UNUSED) {
950                 if (GFS2_EA_REC_LEN(ea) < size)
951                         return 0;
952                 if (!GFS2_EA_IS_STUFFED(ea)) {
953                         error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
954                         if (error)
955                                 return error;
956                 }
957                 es->ea_split = 0;
958         } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
959                 es->ea_split = 1;
960         else
961                 return 0;
962
963         if (stuffed) {
964                 error = ea_set_simple_noalloc(ip, bh, ea, es);
965                 if (error)
966                         return error;
967         } else {
968                 unsigned int blks;
969
970                 es->es_bh = bh;
971                 es->es_ea = ea;
972                 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
973                                         ip->i_sbd->sd_jbsize);
974
975                 error = ea_alloc_skeleton(ip, es->es_er, blks,
976                                           ea_set_simple_alloc, es);
977                 if (error)
978                         return error;
979         }
980
981         return 1;
982 }
983
984 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
985                         void *private)
986 {
987         struct gfs2_sbd *sdp = ip->i_sbd;
988         struct buffer_head *indbh, *newbh;
989         uint64_t *eablk;
990         int error;
991         int mh_size = sizeof(struct gfs2_meta_header);
992
993         if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
994                 uint64_t *end;
995
996                 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
997                                        DIO_START | DIO_WAIT, &indbh);
998                 if (error)
999                         return error;
1000
1001                 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1002                         error = -EIO;
1003                         goto out;
1004                 }
1005
1006                 eablk = (uint64_t *)(indbh->b_data + mh_size);
1007                 end = eablk + sdp->sd_inptrs;
1008
1009                 for (; eablk < end; eablk++)
1010                         if (!*eablk)
1011                                 break;
1012
1013                 if (eablk == end) {
1014                         error = -ENOSPC;
1015                         goto out;
1016                 }
1017
1018                 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1019         } else {
1020                 uint64_t blk;
1021
1022                 blk = gfs2_alloc_meta(ip);
1023
1024                 indbh = gfs2_meta_new(ip->i_gl, blk);
1025                 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1026                 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1027                 gfs2_buffer_clear_tail(indbh, mh_size);
1028
1029                 eablk = (uint64_t *)(indbh->b_data + mh_size);
1030                 *eablk = cpu_to_be64(ip->i_di.di_eattr);
1031                 ip->i_di.di_eattr = blk;
1032                 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
1033                 ip->i_di.di_blocks++;
1034
1035                 eablk++;
1036         }
1037
1038         error = ea_alloc_blk(ip, &newbh);
1039         if (error)
1040                 goto out;
1041
1042         *eablk = cpu_to_be64((uint64_t)newbh->b_blocknr);
1043         error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1044         brelse(newbh);
1045         if (error)
1046                 goto out;
1047
1048         if (private)
1049                 ea_set_remove_stuffed(ip, (struct gfs2_ea_location *)private);
1050
1051  out:
1052         brelse(indbh);
1053
1054         return error;
1055 }
1056
1057 static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1058                     struct gfs2_ea_location *el)
1059 {
1060         struct ea_set es;
1061         unsigned int blks = 2;
1062         int error;
1063
1064         memset(&es, 0, sizeof(struct ea_set));
1065         es.es_er = er;
1066         es.es_el = el;
1067
1068         error = ea_foreach(ip, ea_set_simple, &es);
1069         if (error > 0)
1070                 return 0;
1071         if (error)
1072                 return error;
1073
1074         if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1075                 blks++;
1076         if (GFS2_EAREQ_SIZE_STUFFED(er) > ip->i_sbd->sd_jbsize)
1077                 blks += DIV_ROUND_UP(er->er_data_len, ip->i_sbd->sd_jbsize);
1078
1079         return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1080 }
1081
1082 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1083                                    struct gfs2_ea_location *el)
1084 {
1085         if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1086                 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1087                 gfs2_assert_withdraw(ip->i_sbd,
1088                                      GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1089         }
1090
1091         return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1092 }
1093
1094 int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1095 {
1096         struct gfs2_ea_location el;
1097         int error;
1098
1099         if (!ip->i_di.di_eattr) {
1100                 if (er->er_flags & XATTR_REPLACE)
1101                         return -ENODATA;
1102                 return ea_init(ip, er);
1103         }
1104
1105         error = gfs2_ea_find(ip, er, &el);
1106         if (error)
1107                 return error;
1108
1109         if (el.el_ea) {
1110                 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1111                         brelse(el.el_bh);
1112                         return -EPERM;
1113                 }
1114
1115                 error = -EEXIST;
1116                 if (!(er->er_flags & XATTR_CREATE)) {
1117                         int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1118                         error = ea_set_i(ip, er, &el);
1119                         if (!error && unstuffed)
1120                                 ea_set_remove_unstuffed(ip, &el);
1121                 }
1122
1123                 brelse(el.el_bh);
1124         } else {
1125                 error = -ENODATA;
1126                 if (!(er->er_flags & XATTR_REPLACE))
1127                         error = ea_set_i(ip, er, NULL);
1128         }
1129
1130         return error;
1131 }
1132
1133 int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1134 {
1135         struct gfs2_holder i_gh;
1136         int error;
1137
1138         if (!er->er_name_len ||
1139             er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1140                 return -EINVAL;
1141         if (!er->er_data || !er->er_data_len) {
1142                 er->er_data = NULL;
1143                 er->er_data_len = 0;
1144         }
1145         error = ea_check_size(ip->i_sbd, er);
1146         if (error)
1147                 return error;
1148
1149         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1150         if (error)
1151                 return error;
1152
1153         if (IS_IMMUTABLE(ip->i_vnode))
1154                 error = -EPERM;
1155         else
1156                 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1157
1158         gfs2_glock_dq_uninit(&i_gh);
1159
1160         return error;
1161 }
1162
1163 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1164 {
1165         struct gfs2_ea_header *ea = el->el_ea;
1166         struct gfs2_ea_header *prev = el->el_prev;
1167         struct buffer_head *dibh;
1168         int error;
1169
1170         error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1171         if (error)
1172                 return error;
1173
1174         gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1175
1176         if (prev) {
1177                 uint32_t len;
1178
1179                 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1180                 prev->ea_rec_len = cpu_to_be32(len);
1181
1182                 if (GFS2_EA_IS_LAST(ea))
1183                         prev->ea_flags |= GFS2_EAFLAG_LAST;
1184         } else
1185                 ea->ea_type = GFS2_EATYPE_UNUSED;
1186
1187         error = gfs2_meta_inode_buffer(ip, &dibh);
1188         if (!error) {
1189                 ip->i_di.di_ctime = get_seconds();
1190                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1191                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1192                 brelse(dibh);
1193         }       
1194
1195         gfs2_trans_end(ip->i_sbd);
1196
1197         return error;
1198 }
1199
1200 int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1201 {
1202         struct gfs2_ea_location el;
1203         int error;
1204
1205         if (!ip->i_di.di_eattr)
1206                 return -ENODATA;
1207
1208         error = gfs2_ea_find(ip, er, &el);
1209         if (error)
1210                 return error;
1211         if (!el.el_ea)
1212                 return -ENODATA;
1213
1214         if (GFS2_EA_IS_STUFFED(el.el_ea))
1215                 error = ea_remove_stuffed(ip, &el);
1216         else
1217                 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1218                                             0);
1219
1220         brelse(el.el_bh);
1221
1222         return error;
1223 }
1224
1225 /**
1226  * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1227  * @ip: pointer to the inode of the target file
1228  * @er: request information
1229  *
1230  * Returns: errno
1231  */
1232
1233 int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1234 {
1235         struct gfs2_holder i_gh;
1236         int error;
1237
1238         if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1239                 return -EINVAL;
1240
1241         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1242         if (error)
1243                 return error;
1244
1245         if (IS_IMMUTABLE(ip->i_vnode) || IS_APPEND(ip->i_vnode))
1246                 error = -EPERM;
1247         else
1248                 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1249
1250         gfs2_glock_dq_uninit(&i_gh);
1251
1252         return error;
1253 }
1254
1255 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1256                                   struct gfs2_ea_header *ea, char *data)
1257 {
1258         struct gfs2_sbd *sdp = ip->i_sbd;
1259         struct buffer_head **bh;
1260         unsigned int amount = GFS2_EA_DATA_LEN(ea);
1261         unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1262         uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
1263         unsigned int x;
1264         int error;
1265
1266         bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
1267         if (!bh)
1268                 return -ENOMEM;
1269
1270         error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1271         if (error)
1272                 goto out;
1273
1274         for (x = 0; x < nptrs; x++) {
1275                 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
1276                                        DIO_START, bh + x);
1277                 if (error) {
1278                         while (x--)
1279                                 brelse(bh[x]);
1280                         goto fail;
1281                 }
1282                 dataptrs++;
1283         }
1284
1285         for (x = 0; x < nptrs; x++) {
1286                 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
1287                 if (error) {
1288                         for (; x < nptrs; x++)
1289                                 brelse(bh[x]);
1290                         goto fail;
1291                 }
1292                 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1293                         for (; x < nptrs; x++)
1294                                 brelse(bh[x]);
1295                         error = -EIO;
1296                         goto fail;
1297                 }
1298
1299                 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1300
1301                 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header),
1302                        data,
1303                        (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1304
1305                 amount -= sdp->sd_jbsize;
1306                 data += sdp->sd_jbsize;
1307
1308                 brelse(bh[x]);
1309         }
1310
1311  out:
1312         kfree(bh);
1313
1314         return error;
1315
1316  fail:
1317         gfs2_trans_end(sdp);
1318         kfree(bh);
1319
1320         return error;
1321 }
1322
1323 int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1324                       struct iattr *attr, char *data)
1325 {
1326         struct buffer_head *dibh;
1327         int error;
1328
1329         if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1330                 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1331                 if (error)
1332                         return error;
1333
1334                 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1335                 memcpy(GFS2_EA2DATA(el->el_ea),
1336                        data,
1337                        GFS2_EA_DATA_LEN(el->el_ea));
1338         } else
1339                 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1340
1341         if (error)
1342                 return error;
1343
1344         error = gfs2_meta_inode_buffer(ip, &dibh);
1345         if (!error) {
1346                 error = inode_setattr(ip->i_vnode, attr);
1347                 gfs2_assert_warn(ip->i_sbd, !error);
1348                 gfs2_inode_attr_out(ip);
1349                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1350                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1351                 brelse(dibh);
1352         }
1353
1354         gfs2_trans_end(ip->i_sbd);
1355
1356         return error;
1357 }
1358
1359 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1360 {
1361         struct gfs2_sbd *sdp = ip->i_sbd;
1362         struct gfs2_rgrp_list rlist;
1363         struct buffer_head *indbh, *dibh;
1364         uint64_t *eablk, *end;
1365         unsigned int rg_blocks = 0;
1366         uint64_t bstart = 0;
1367         unsigned int blen = 0;
1368         unsigned int blks = 0;
1369         unsigned int x;
1370         int error;
1371
1372         memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1373
1374         error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
1375                                DIO_START | DIO_WAIT, &indbh);
1376         if (error)
1377                 return error;
1378
1379         if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1380                 error = -EIO;
1381                 goto out;
1382         }
1383
1384         eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1385         end = eablk + sdp->sd_inptrs;
1386
1387         for (; eablk < end; eablk++) {
1388                 uint64_t bn;
1389
1390                 if (!*eablk)
1391                         break;
1392                 bn = be64_to_cpu(*eablk);
1393
1394                 if (bstart + blen == bn)
1395                         blen++;
1396                 else {
1397                         if (bstart)
1398                                 gfs2_rlist_add(sdp, &rlist, bstart);
1399                         bstart = bn;
1400                         blen = 1;
1401                 }
1402                 blks++;
1403         }
1404         if (bstart)
1405                 gfs2_rlist_add(sdp, &rlist, bstart);
1406         else
1407                 goto out;
1408
1409         gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1410
1411         for (x = 0; x < rlist.rl_rgrps; x++) {
1412                 struct gfs2_rgrpd *rgd;
1413                 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1414                 rg_blocks += rgd->rd_ri.ri_length;
1415         }
1416
1417         error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1418         if (error)
1419                 goto out_rlist_free;
1420
1421         error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
1422                                  RES_INDIRECT + RES_STATFS +
1423                                  RES_QUOTA, blks);
1424         if (error)
1425                 goto out_gunlock;
1426
1427         gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1428
1429         eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1430         bstart = 0;
1431         blen = 0;
1432
1433         for (; eablk < end; eablk++) {
1434                 uint64_t bn;
1435
1436                 if (!*eablk)
1437                         break;
1438                 bn = be64_to_cpu(*eablk);
1439
1440                 if (bstart + blen == bn)
1441                         blen++;
1442                 else {
1443                         if (bstart)
1444                                 gfs2_free_meta(ip, bstart, blen);
1445                         bstart = bn;
1446                         blen = 1;
1447                 }
1448
1449                 *eablk = 0;
1450                 if (!ip->i_di.di_blocks)
1451                         gfs2_consist_inode(ip);
1452                 ip->i_di.di_blocks--;
1453         }
1454         if (bstart)
1455                 gfs2_free_meta(ip, bstart, blen);
1456
1457         ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1458
1459         error = gfs2_meta_inode_buffer(ip, &dibh);
1460         if (!error) {
1461                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1462                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1463                 brelse(dibh);
1464         }
1465
1466         gfs2_trans_end(sdp);
1467
1468  out_gunlock:
1469         gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1470
1471  out_rlist_free:
1472         gfs2_rlist_free(&rlist);
1473
1474  out:
1475         brelse(indbh);
1476
1477         return error;
1478 }
1479
1480 static int ea_dealloc_block(struct gfs2_inode *ip)
1481 {
1482         struct gfs2_sbd *sdp = ip->i_sbd;
1483         struct gfs2_alloc *al = &ip->i_alloc;
1484         struct gfs2_rgrpd *rgd;
1485         struct buffer_head *dibh;
1486         int error;
1487
1488         rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1489         if (!rgd) {
1490                 gfs2_consist_inode(ip);
1491                 return -EIO;
1492         }
1493
1494         error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1495                                    &al->al_rgd_gh);
1496         if (error)
1497                 return error;
1498
1499         error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE +
1500                                  RES_STATFS + RES_QUOTA, 1);
1501         if (error)
1502                 goto out_gunlock;
1503
1504         gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1505
1506         ip->i_di.di_eattr = 0;
1507         if (!ip->i_di.di_blocks)
1508                 gfs2_consist_inode(ip);
1509         ip->i_di.di_blocks--;
1510
1511         error = gfs2_meta_inode_buffer(ip, &dibh);
1512         if (!error) {
1513                 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1514                 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1515                 brelse(dibh);
1516         }
1517
1518         gfs2_trans_end(sdp);
1519
1520  out_gunlock:
1521         gfs2_glock_dq_uninit(&al->al_rgd_gh);
1522
1523         return error;
1524 }
1525
1526 /**
1527  * gfs2_ea_dealloc - deallocate the extended attribute fork
1528  * @ip: the inode
1529  *
1530  * Returns: errno
1531  */
1532
1533 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1534 {
1535         struct gfs2_alloc *al;
1536         int error;
1537
1538         al = gfs2_alloc_get(ip);
1539
1540         error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1541         if (error)
1542                 goto out_alloc;
1543
1544         error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
1545         if (error)
1546                 goto out_quota;
1547
1548         error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1549         if (error)
1550                 goto out_rindex;
1551
1552         if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1553                 error = ea_dealloc_indirect(ip);
1554                 if (error)
1555                         goto out_rindex;
1556         }
1557
1558         error = ea_dealloc_block(ip);
1559
1560  out_rindex:
1561         gfs2_glock_dq_uninit(&al->al_ri_gh);
1562
1563  out_quota:
1564         gfs2_quota_unhold(ip);
1565
1566  out_alloc:
1567         gfs2_alloc_put(ip);
1568
1569         return error;
1570 }
1571