2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/xattr.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/lm_interface.h>
18 #include <asm/uaccess.h>
34 * ea_calc_size - returns the acutal number of bytes the request will take up
35 * (not counting any unstuffed data blocks)
40 * Returns: 1 if the EA should be stuffed
43 static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
46 *size = GFS2_EAREQ_SIZE_STUFFED(er);
47 if (*size <= sdp->sd_jbsize)
50 *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
55 static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
59 if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
62 ea_calc_size(sdp, er, &size);
64 /* This can only happen with 512 byte blocks */
65 if (size > sdp->sd_jbsize)
71 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
72 struct gfs2_ea_header *ea,
73 struct gfs2_ea_header *prev, void *private);
75 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
76 ea_call_t ea_call, void *data)
78 struct gfs2_ea_header *ea, *prev = NULL;
81 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
84 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
85 if (!GFS2_EA_REC_LEN(ea))
87 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
88 bh->b_data + bh->b_size))
90 if (!GFS2_EATYPE_VALID(ea->ea_type))
93 error = ea_call(ip, bh, ea, prev, data);
97 if (GFS2_EA_IS_LAST(ea)) {
98 if ((char *)GFS2_EA2NEXT(ea) !=
99 bh->b_data + bh->b_size)
108 gfs2_consist_inode(ip);
112 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
114 struct buffer_head *bh, *eabh;
118 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
122 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
123 error = ea_foreach_i(ip, bh, ea_call, data);
127 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
132 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
133 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
135 for (; eablk < end; eablk++) {
140 bn = be64_to_cpu(*eablk);
142 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
145 error = ea_foreach_i(ip, eabh, ea_call, data);
156 struct gfs2_ea_request *ef_er;
157 struct gfs2_ea_location *ef_el;
160 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
161 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
164 struct ea_find *ef = private;
165 struct gfs2_ea_request *er = ef->ef_er;
167 if (ea->ea_type == GFS2_EATYPE_UNUSED)
170 if (ea->ea_type == er->er_type) {
171 if (ea->ea_name_len == er->er_name_len &&
172 !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
173 struct gfs2_ea_location *el = ef->ef_el;
185 int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
186 struct gfs2_ea_location *el)
194 memset(el, 0, sizeof(struct gfs2_ea_location));
196 error = ea_foreach(ip, ea_find_i, &ef);
204 * ea_dealloc_unstuffed -
211 * Take advantage of the fact that all unstuffed blocks are
212 * allocated from the same RG. But watch, this may not always
218 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
219 struct gfs2_ea_header *ea,
220 struct gfs2_ea_header *prev, void *private)
222 int *leave = private;
223 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
224 struct gfs2_rgrpd *rgd;
225 struct gfs2_holder rg_gh;
226 struct buffer_head *dibh;
230 unsigned int blen = 0;
231 unsigned int blks = 0;
235 if (GFS2_EA_IS_STUFFED(ea))
238 dataptrs = GFS2_EA2DATAPTRS(ea);
239 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
242 bn = be64_to_cpu(*dataptrs);
248 rgd = gfs2_blk2rgrpd(sdp, bn);
250 gfs2_consist_inode(ip);
254 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
258 error = gfs2_trans_begin(sdp, rgd->rd_ri.ri_length + RES_DINODE +
259 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
263 gfs2_trans_add_bh(ip->i_gl, bh, 1);
265 dataptrs = GFS2_EA2DATAPTRS(ea);
266 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
269 bn = be64_to_cpu(*dataptrs);
271 if (bstart + blen == bn)
275 gfs2_free_meta(ip, bstart, blen);
281 if (!ip->i_di.di_blocks)
282 gfs2_consist_inode(ip);
283 ip->i_di.di_blocks--;
286 gfs2_free_meta(ip, bstart, blen);
288 if (prev && !leave) {
291 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
292 prev->ea_rec_len = cpu_to_be32(len);
294 if (GFS2_EA_IS_LAST(ea))
295 prev->ea_flags |= GFS2_EAFLAG_LAST;
297 ea->ea_type = GFS2_EATYPE_UNUSED;
301 error = gfs2_meta_inode_buffer(ip, &dibh);
303 ip->i_di.di_ctime = get_seconds();
304 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
305 gfs2_dinode_out(ip, dibh->b_data);
312 gfs2_glock_dq_uninit(&rg_gh);
316 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
317 struct gfs2_ea_header *ea,
318 struct gfs2_ea_header *prev, int leave)
320 struct gfs2_alloc *al;
323 al = gfs2_alloc_get(ip);
325 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
329 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
333 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
335 gfs2_glock_dq_uninit(&al->al_ri_gh);
338 gfs2_quota_unhold(ip);
345 struct gfs2_ea_request *ei_er;
346 unsigned int ei_size;
349 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
350 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
353 struct ea_list *ei = private;
354 struct gfs2_ea_request *er = ei->ei_er;
355 unsigned int ea_size = gfs2_ea_strlen(ea);
357 if (ea->ea_type == GFS2_EATYPE_UNUSED)
360 if (er->er_data_len) {
365 if (ei->ei_size + ea_size > er->er_data_len)
368 switch (ea->ea_type) {
369 case GFS2_EATYPE_USR:
373 case GFS2_EATYPE_SYS:
377 case GFS2_EATYPE_SECURITY:
378 prefix = "security.";
385 memcpy(er->er_data + ei->ei_size, prefix, l);
386 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
388 memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
391 ei->ei_size += ea_size;
401 * Returns: actual size of data on success, -errno on error
404 int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
406 struct gfs2_holder i_gh;
409 if (!er->er_data || !er->er_data_len) {
414 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
418 if (ip->i_di.di_eattr) {
419 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
421 error = ea_foreach(ip, ea_list_i, &ei);
426 gfs2_glock_dq_uninit(&i_gh);
432 * ea_get_unstuffed - actually copies the unstuffed data into the
434 * @ip: The GFS2 inode
435 * @ea: The extended attribute header structure
436 * @data: The data to be copied
441 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
444 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
445 struct buffer_head **bh;
446 unsigned int amount = GFS2_EA_DATA_LEN(ea);
447 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
448 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
452 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
456 for (x = 0; x < nptrs; x++) {
457 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
467 for (x = 0; x < nptrs; x++) {
468 error = gfs2_meta_wait(sdp, bh[x]);
470 for (; x < nptrs; x++)
474 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
475 for (; x < nptrs; x++)
481 memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
482 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
484 amount -= sdp->sd_jbsize;
485 data += sdp->sd_jbsize;
495 int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
498 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
499 memcpy(data, GFS2_EA2DATA(el->el_ea), GFS2_EA_DATA_LEN(el->el_ea));
502 return ea_get_unstuffed(ip, el->el_ea, data);
507 * @ip: The GFS2 inode
508 * @er: The request structure
510 * Returns: actual size of data on success, -errno on error
513 int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
515 struct gfs2_ea_location el;
518 if (!ip->i_di.di_eattr)
521 error = gfs2_ea_find(ip, er, &el);
527 if (er->er_data_len) {
528 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
531 error = gfs2_ea_get_copy(ip, &el, er->er_data);
534 error = GFS2_EA_DATA_LEN(el.el_ea);
543 * @ip: The GFS2 inode
544 * @er: The request structure
546 * Returns: actual size of data on success, -errno on error
549 int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
551 struct gfs2_holder i_gh;
554 if (!er->er_name_len ||
555 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
557 if (!er->er_data || !er->er_data_len) {
562 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
566 error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
568 gfs2_glock_dq_uninit(&i_gh);
574 * ea_alloc_blk - allocates a new block for extended attributes.
575 * @ip: A pointer to the inode that's getting extended attributes
576 * @bhp: Pointer to pointer to a struct buffer_head
581 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
583 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
584 struct gfs2_ea_header *ea;
587 block = gfs2_alloc_meta(ip);
589 *bhp = gfs2_meta_new(ip->i_gl, block);
590 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
591 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
592 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
594 ea = GFS2_EA_BH2FIRST(*bhp);
595 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
596 ea->ea_type = GFS2_EATYPE_UNUSED;
597 ea->ea_flags = GFS2_EAFLAG_LAST;
600 ip->i_di.di_blocks++;
606 * ea_write - writes the request info to an ea, creating new blocks if
608 * @ip: inode that is being modified
609 * @ea: the location of the new ea in a block
610 * @er: the write request
612 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
617 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
618 struct gfs2_ea_request *er)
620 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
622 ea->ea_data_len = cpu_to_be32(er->er_data_len);
623 ea->ea_name_len = er->er_name_len;
624 ea->ea_type = er->er_type;
627 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
629 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
631 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
633 __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
634 const char *data = er->er_data;
635 unsigned int data_len = er->er_data_len;
639 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
640 for (x = 0; x < ea->ea_num_ptrs; x++) {
641 struct buffer_head *bh;
643 int mh_size = sizeof(struct gfs2_meta_header);
645 block = gfs2_alloc_meta(ip);
647 bh = gfs2_meta_new(ip->i_gl, block);
648 gfs2_trans_add_bh(ip->i_gl, bh, 1);
649 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
651 ip->i_di.di_blocks++;
653 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
655 memcpy(bh->b_data + mh_size, data, copy);
656 if (copy < sdp->sd_jbsize)
657 memset(bh->b_data + mh_size + copy, 0,
658 sdp->sd_jbsize - copy);
660 *dataptr++ = cpu_to_be64(bh->b_blocknr);
667 gfs2_assert_withdraw(sdp, !data_len);
673 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
674 struct gfs2_ea_request *er, void *private);
676 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
678 ea_skeleton_call_t skeleton_call, void *private)
680 struct gfs2_alloc *al;
681 struct buffer_head *dibh;
684 al = gfs2_alloc_get(ip);
686 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
690 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
694 al->al_requested = blks;
696 error = gfs2_inplace_reserve(ip);
700 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
701 blks + al->al_rgd->rd_ri.ri_length +
702 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
706 error = skeleton_call(ip, er, private);
710 error = gfs2_meta_inode_buffer(ip, &dibh);
712 if (er->er_flags & GFS2_ERF_MODE) {
713 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
714 (ip->i_di.di_mode & S_IFMT) ==
715 (er->er_mode & S_IFMT));
716 ip->i_di.di_mode = er->er_mode;
718 ip->i_di.di_ctime = get_seconds();
719 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
720 gfs2_dinode_out(ip, dibh->b_data);
725 gfs2_trans_end(GFS2_SB(&ip->i_inode));
727 gfs2_inplace_release(ip);
729 gfs2_quota_unlock(ip);
735 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
738 struct buffer_head *bh;
741 error = ea_alloc_blk(ip, &bh);
745 ip->i_di.di_eattr = bh->b_blocknr;
746 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
754 * ea_init - initializes a new eattr block
761 static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
763 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
764 unsigned int blks = 1;
766 if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
767 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
769 return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
772 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
774 u32 ea_size = GFS2_EA_SIZE(ea);
775 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
777 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
778 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
780 ea->ea_rec_len = cpu_to_be32(ea_size);
781 ea->ea_flags ^= last;
783 new->ea_rec_len = cpu_to_be32(new_size);
784 new->ea_flags = last;
789 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
790 struct gfs2_ea_location *el)
792 struct gfs2_ea_header *ea = el->el_ea;
793 struct gfs2_ea_header *prev = el->el_prev;
796 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
798 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
799 ea->ea_type = GFS2_EATYPE_UNUSED;
801 } else if (GFS2_EA2NEXT(prev) != ea) {
802 prev = GFS2_EA2NEXT(prev);
803 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
806 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
807 prev->ea_rec_len = cpu_to_be32(len);
809 if (GFS2_EA_IS_LAST(ea))
810 prev->ea_flags |= GFS2_EAFLAG_LAST;
816 struct gfs2_ea_request *es_er;
817 struct gfs2_ea_location *es_el;
819 struct buffer_head *es_bh;
820 struct gfs2_ea_header *es_ea;
823 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
824 struct gfs2_ea_header *ea, struct ea_set *es)
826 struct gfs2_ea_request *er = es->es_er;
827 struct buffer_head *dibh;
830 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
834 gfs2_trans_add_bh(ip->i_gl, bh, 1);
837 ea = ea_split_ea(ea);
839 ea_write(ip, ea, er);
842 ea_set_remove_stuffed(ip, es->es_el);
844 error = gfs2_meta_inode_buffer(ip, &dibh);
848 if (er->er_flags & GFS2_ERF_MODE) {
849 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
850 (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
851 ip->i_di.di_mode = er->er_mode;
853 ip->i_di.di_ctime = get_seconds();
854 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
855 gfs2_dinode_out(ip, dibh->b_data);
858 gfs2_trans_end(GFS2_SB(&ip->i_inode));
862 static int ea_set_simple_alloc(struct gfs2_inode *ip,
863 struct gfs2_ea_request *er, void *private)
865 struct ea_set *es = private;
866 struct gfs2_ea_header *ea = es->es_ea;
869 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
872 ea = ea_split_ea(ea);
874 error = ea_write(ip, ea, er);
879 ea_set_remove_stuffed(ip, es->es_el);
884 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
885 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
888 struct ea_set *es = private;
893 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
895 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
896 if (GFS2_EA_REC_LEN(ea) < size)
898 if (!GFS2_EA_IS_STUFFED(ea)) {
899 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
904 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
910 error = ea_set_simple_noalloc(ip, bh, ea, es);
918 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
919 GFS2_SB(&ip->i_inode)->sd_jbsize);
921 error = ea_alloc_skeleton(ip, es->es_er, blks,
922 ea_set_simple_alloc, es);
930 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
933 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
934 struct buffer_head *indbh, *newbh;
937 int mh_size = sizeof(struct gfs2_meta_header);
939 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
942 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
947 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
952 eablk = (__be64 *)(indbh->b_data + mh_size);
953 end = eablk + sdp->sd_inptrs;
955 for (; eablk < end; eablk++)
964 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
968 blk = gfs2_alloc_meta(ip);
970 indbh = gfs2_meta_new(ip->i_gl, blk);
971 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
972 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
973 gfs2_buffer_clear_tail(indbh, mh_size);
975 eablk = (__be64 *)(indbh->b_data + mh_size);
976 *eablk = cpu_to_be64(ip->i_di.di_eattr);
977 ip->i_di.di_eattr = blk;
978 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
979 ip->i_di.di_blocks++;
984 error = ea_alloc_blk(ip, &newbh);
988 *eablk = cpu_to_be64((u64)newbh->b_blocknr);
989 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
995 ea_set_remove_stuffed(ip, private);
1002 static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1003 struct gfs2_ea_location *el)
1006 unsigned int blks = 2;
1009 memset(&es, 0, sizeof(struct ea_set));
1013 error = ea_foreach(ip, ea_set_simple, &es);
1019 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1021 if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1022 blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1024 return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1027 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1028 struct gfs2_ea_location *el)
1030 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1031 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1032 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1033 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1036 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1039 int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1041 struct gfs2_ea_location el;
1044 if (!ip->i_di.di_eattr) {
1045 if (er->er_flags & XATTR_REPLACE)
1047 return ea_init(ip, er);
1050 error = gfs2_ea_find(ip, er, &el);
1055 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1061 if (!(er->er_flags & XATTR_CREATE)) {
1062 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1063 error = ea_set_i(ip, er, &el);
1064 if (!error && unstuffed)
1065 ea_set_remove_unstuffed(ip, &el);
1071 if (!(er->er_flags & XATTR_REPLACE))
1072 error = ea_set_i(ip, er, NULL);
1078 int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1080 struct gfs2_holder i_gh;
1083 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1085 if (!er->er_data || !er->er_data_len) {
1087 er->er_data_len = 0;
1089 error = ea_check_size(GFS2_SB(&ip->i_inode), er);
1093 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1097 if (IS_IMMUTABLE(&ip->i_inode))
1100 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1102 gfs2_glock_dq_uninit(&i_gh);
1107 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1109 struct gfs2_ea_header *ea = el->el_ea;
1110 struct gfs2_ea_header *prev = el->el_prev;
1111 struct buffer_head *dibh;
1114 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1118 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1123 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1124 prev->ea_rec_len = cpu_to_be32(len);
1126 if (GFS2_EA_IS_LAST(ea))
1127 prev->ea_flags |= GFS2_EAFLAG_LAST;
1129 ea->ea_type = GFS2_EATYPE_UNUSED;
1131 error = gfs2_meta_inode_buffer(ip, &dibh);
1133 ip->i_di.di_ctime = get_seconds();
1134 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1135 gfs2_dinode_out(ip, dibh->b_data);
1139 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1144 int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1146 struct gfs2_ea_location el;
1149 if (!ip->i_di.di_eattr)
1152 error = gfs2_ea_find(ip, er, &el);
1158 if (GFS2_EA_IS_STUFFED(el.el_ea))
1159 error = ea_remove_stuffed(ip, &el);
1161 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1170 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1171 * @ip: pointer to the inode of the target file
1172 * @er: request information
1177 int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1179 struct gfs2_holder i_gh;
1182 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1185 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1189 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1192 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1194 gfs2_glock_dq_uninit(&i_gh);
1199 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1200 struct gfs2_ea_header *ea, char *data)
1202 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1203 struct buffer_head **bh;
1204 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1205 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1206 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
1210 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
1214 error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1218 for (x = 0; x < nptrs; x++) {
1219 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
1229 for (x = 0; x < nptrs; x++) {
1230 error = gfs2_meta_wait(sdp, bh[x]);
1232 for (; x < nptrs; x++)
1236 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1237 for (; x < nptrs; x++)
1243 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1245 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
1246 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1248 amount -= sdp->sd_jbsize;
1249 data += sdp->sd_jbsize;
1259 gfs2_trans_end(sdp);
1264 int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1265 struct iattr *attr, char *data)
1267 struct buffer_head *dibh;
1270 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1271 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1275 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1276 memcpy(GFS2_EA2DATA(el->el_ea), data,
1277 GFS2_EA_DATA_LEN(el->el_ea));
1279 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1284 error = gfs2_meta_inode_buffer(ip, &dibh);
1286 error = inode_setattr(&ip->i_inode, attr);
1287 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1288 gfs2_inode_attr_out(ip);
1289 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1290 gfs2_dinode_out(ip, dibh->b_data);
1294 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1299 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1301 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1302 struct gfs2_rgrp_list rlist;
1303 struct buffer_head *indbh, *dibh;
1304 __be64 *eablk, *end;
1305 unsigned int rg_blocks = 0;
1307 unsigned int blen = 0;
1308 unsigned int blks = 0;
1312 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1314 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh);
1318 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1323 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1324 end = eablk + sdp->sd_inptrs;
1326 for (; eablk < end; eablk++) {
1331 bn = be64_to_cpu(*eablk);
1333 if (bstart + blen == bn)
1337 gfs2_rlist_add(sdp, &rlist, bstart);
1344 gfs2_rlist_add(sdp, &rlist, bstart);
1348 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1350 for (x = 0; x < rlist.rl_rgrps; x++) {
1351 struct gfs2_rgrpd *rgd;
1352 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1353 rg_blocks += rgd->rd_ri.ri_length;
1356 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1358 goto out_rlist_free;
1360 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1361 RES_STATFS + RES_QUOTA, blks);
1365 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1367 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1371 for (; eablk < end; eablk++) {
1376 bn = be64_to_cpu(*eablk);
1378 if (bstart + blen == bn)
1382 gfs2_free_meta(ip, bstart, blen);
1388 if (!ip->i_di.di_blocks)
1389 gfs2_consist_inode(ip);
1390 ip->i_di.di_blocks--;
1393 gfs2_free_meta(ip, bstart, blen);
1395 ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1397 error = gfs2_meta_inode_buffer(ip, &dibh);
1399 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1400 gfs2_dinode_out(ip, dibh->b_data);
1404 gfs2_trans_end(sdp);
1407 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1409 gfs2_rlist_free(&rlist);
1415 static int ea_dealloc_block(struct gfs2_inode *ip)
1417 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1418 struct gfs2_alloc *al = &ip->i_alloc;
1419 struct gfs2_rgrpd *rgd;
1420 struct buffer_head *dibh;
1423 rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1425 gfs2_consist_inode(ip);
1429 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1434 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1439 gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1441 ip->i_di.di_eattr = 0;
1442 if (!ip->i_di.di_blocks)
1443 gfs2_consist_inode(ip);
1444 ip->i_di.di_blocks--;
1446 error = gfs2_meta_inode_buffer(ip, &dibh);
1448 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1449 gfs2_dinode_out(ip, dibh->b_data);
1453 gfs2_trans_end(sdp);
1456 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1461 * gfs2_ea_dealloc - deallocate the extended attribute fork
1467 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1469 struct gfs2_alloc *al;
1472 al = gfs2_alloc_get(ip);
1474 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1478 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
1482 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1486 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1487 error = ea_dealloc_indirect(ip);
1492 error = ea_dealloc_block(ip);
1495 gfs2_glock_dq_uninit(&al->al_ri_gh);
1497 gfs2_quota_unhold(ip);