2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/xattr.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <asm/uaccess.h>
20 #include "lm_interface.h"
34 * ea_calc_size - returns the acutal number of bytes the request will take up
35 * (not counting any unstuffed data blocks)
40 * Returns: 1 if the EA should be stuffed
43 static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
46 *size = GFS2_EAREQ_SIZE_STUFFED(er);
47 if (*size <= sdp->sd_jbsize)
50 *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
55 static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
59 if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
62 ea_calc_size(sdp, er, &size);
64 /* This can only happen with 512 byte blocks */
65 if (size > sdp->sd_jbsize)
71 typedef int (*ea_call_t) (struct gfs2_inode *ip,
72 struct buffer_head *bh,
73 struct gfs2_ea_header *ea,
74 struct gfs2_ea_header *prev,
77 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
78 ea_call_t ea_call, void *data)
80 struct gfs2_ea_header *ea, *prev = NULL;
83 if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_EA))
86 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
87 if (!GFS2_EA_REC_LEN(ea))
89 if (!(bh->b_data <= (char *)ea &&
90 (char *)GFS2_EA2NEXT(ea) <=
91 bh->b_data + bh->b_size))
93 if (!GFS2_EATYPE_VALID(ea->ea_type))
96 error = ea_call(ip, bh, ea, prev, data);
100 if (GFS2_EA_IS_LAST(ea)) {
101 if ((char *)GFS2_EA2NEXT(ea) !=
102 bh->b_data + bh->b_size)
111 gfs2_consist_inode(ip);
115 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
117 struct buffer_head *bh, *eabh;
118 uint64_t *eablk, *end;
121 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
122 DIO_START | DIO_WAIT, &bh);
126 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
127 error = ea_foreach_i(ip, bh, ea_call, data);
131 if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_IN)) {
136 eablk = (uint64_t *)(bh->b_data + sizeof(struct gfs2_meta_header));
137 end = eablk + ip->i_sbd->sd_inptrs;
139 for (; eablk < end; eablk++) {
144 bn = be64_to_cpu(*eablk);
146 error = gfs2_meta_read(ip->i_gl, bn, DIO_START | DIO_WAIT,
150 error = ea_foreach_i(ip, eabh, ea_call, data);
162 struct gfs2_ea_request *ef_er;
163 struct gfs2_ea_location *ef_el;
166 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
167 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
170 struct ea_find *ef = private;
171 struct gfs2_ea_request *er = ef->ef_er;
173 if (ea->ea_type == GFS2_EATYPE_UNUSED)
176 if (ea->ea_type == er->er_type) {
177 if (ea->ea_name_len == er->er_name_len &&
178 !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
179 struct gfs2_ea_location *el = ef->ef_el;
189 else if ((ip->i_di.di_flags & GFS2_DIF_EA_PACKED) &&
190 er->er_type == GFS2_EATYPE_SYS)
197 int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
198 struct gfs2_ea_location *el)
206 memset(el, 0, sizeof(struct gfs2_ea_location));
208 error = ea_foreach(ip, ea_find_i, &ef);
216 * ea_dealloc_unstuffed -
223 * Take advantage of the fact that all unstuffed blocks are
224 * allocated from the same RG. But watch, this may not always
230 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
231 struct gfs2_ea_header *ea,
232 struct gfs2_ea_header *prev, void *private)
234 int *leave = private;
235 struct gfs2_sbd *sdp = ip->i_sbd;
236 struct gfs2_rgrpd *rgd;
237 struct gfs2_holder rg_gh;
238 struct buffer_head *dibh;
239 uint64_t *dataptrs, bn = 0;
241 unsigned int blen = 0;
242 unsigned int blks = 0;
246 if (GFS2_EA_IS_STUFFED(ea))
249 dataptrs = GFS2_EA2DATAPTRS(ea);
250 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++)
253 bn = be64_to_cpu(*dataptrs);
258 rgd = gfs2_blk2rgrpd(sdp, bn);
260 gfs2_consist_inode(ip);
264 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
268 error = gfs2_trans_begin(sdp, rgd->rd_ri.ri_length +
269 RES_DINODE + RES_EATTR + RES_STATFS +
274 gfs2_trans_add_bh(ip->i_gl, bh, 1);
276 dataptrs = GFS2_EA2DATAPTRS(ea);
277 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
280 bn = be64_to_cpu(*dataptrs);
282 if (bstart + blen == bn)
286 gfs2_free_meta(ip, bstart, blen);
292 if (!ip->i_di.di_blocks)
293 gfs2_consist_inode(ip);
294 ip->i_di.di_blocks--;
297 gfs2_free_meta(ip, bstart, blen);
299 if (prev && !leave) {
302 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
303 prev->ea_rec_len = cpu_to_be32(len);
305 if (GFS2_EA_IS_LAST(ea))
306 prev->ea_flags |= GFS2_EAFLAG_LAST;
308 ea->ea_type = GFS2_EATYPE_UNUSED;
312 error = gfs2_meta_inode_buffer(ip, &dibh);
314 ip->i_di.di_ctime = get_seconds();
315 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
316 gfs2_dinode_out(&ip->i_di, dibh->b_data);
323 gfs2_glock_dq_uninit(&rg_gh);
328 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
329 struct gfs2_ea_header *ea,
330 struct gfs2_ea_header *prev, int leave)
332 struct gfs2_alloc *al;
335 al = gfs2_alloc_get(ip);
337 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
341 error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
345 error = ea_dealloc_unstuffed(ip,
347 (leave) ? &error : NULL);
349 gfs2_glock_dq_uninit(&al->al_ri_gh);
352 gfs2_quota_unhold(ip);
361 struct gfs2_ea_request *ei_er;
362 unsigned int ei_size;
365 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
366 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
369 struct ea_list *ei = private;
370 struct gfs2_ea_request *er = ei->ei_er;
371 unsigned int ea_size = gfs2_ea_strlen(ea);
373 if (ea->ea_type == GFS2_EATYPE_UNUSED)
376 if (er->er_data_len) {
381 if (ei->ei_size + ea_size > er->er_data_len)
384 switch (ea->ea_type) {
385 case GFS2_EATYPE_USR:
389 case GFS2_EATYPE_SYS:
393 case GFS2_EATYPE_SECURITY:
394 prefix = "security.";
398 /* FIXME: Needs looking at again */
402 memcpy(er->er_data + ei->ei_size, prefix, l);
403 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
405 memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
408 ei->ei_size += ea_size;
418 * Returns: actual size of data on success, -errno on error
421 int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
423 struct gfs2_holder i_gh;
426 if (!er->er_data || !er->er_data_len) {
431 error = gfs2_glock_nq_init(ip->i_gl,
432 LM_ST_SHARED, LM_FLAG_ANY,
437 if (ip->i_di.di_eattr) {
438 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
440 error = ea_foreach(ip, ea_list_i, &ei);
445 gfs2_glock_dq_uninit(&i_gh);
451 * ea_get_unstuffed - actually copies the unstuffed data into the
460 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
463 struct gfs2_sbd *sdp = ip->i_sbd;
464 struct buffer_head **bh;
465 unsigned int amount = GFS2_EA_DATA_LEN(ea);
466 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
467 uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
471 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
475 for (x = 0; x < nptrs; x++) {
476 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
486 for (x = 0; x < nptrs; x++) {
487 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
489 for (; x < nptrs; x++)
493 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
494 for (; x < nptrs; x++)
501 bh[x]->b_data + sizeof(struct gfs2_meta_header),
502 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
504 amount -= sdp->sd_jbsize;
505 data += sdp->sd_jbsize;
516 int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
519 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
521 GFS2_EA2DATA(el->el_ea),
522 GFS2_EA_DATA_LEN(el->el_ea));
525 return ea_get_unstuffed(ip, el->el_ea, data);
533 * Returns: actual size of data on success, -errno on error
536 int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
538 struct gfs2_ea_location el;
541 if (!ip->i_di.di_eattr)
544 error = gfs2_ea_find(ip, er, &el);
550 if (er->er_data_len) {
551 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
554 error = gfs2_ea_get_copy(ip, &el, er->er_data);
557 error = GFS2_EA_DATA_LEN(el.el_ea);
569 * Returns: actual size of data on success, -errno on error
572 int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
574 struct gfs2_holder i_gh;
577 if (!er->er_name_len ||
578 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
580 if (!er->er_data || !er->er_data_len) {
585 error = gfs2_glock_nq_init(ip->i_gl,
586 LM_ST_SHARED, LM_FLAG_ANY,
591 error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
593 gfs2_glock_dq_uninit(&i_gh);
599 * ea_alloc_blk - allocates a new block for extended attributes.
600 * @ip: A pointer to the inode that's getting extended attributes
606 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
608 struct gfs2_sbd *sdp = ip->i_sbd;
609 struct gfs2_ea_header *ea;
612 block = gfs2_alloc_meta(ip);
614 *bhp = gfs2_meta_new(ip->i_gl, block);
615 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
616 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
617 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
619 ea = GFS2_EA_BH2FIRST(*bhp);
620 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
621 ea->ea_type = GFS2_EATYPE_UNUSED;
622 ea->ea_flags = GFS2_EAFLAG_LAST;
625 ip->i_di.di_blocks++;
631 * ea_write - writes the request info to an ea, creating new blocks if
633 * @ip: inode that is being modified
634 * @ea: the location of the new ea in a block
635 * @er: the write request
637 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
642 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
643 struct gfs2_ea_request *er)
645 struct gfs2_sbd *sdp = ip->i_sbd;
647 ea->ea_data_len = cpu_to_be32(er->er_data_len);
648 ea->ea_name_len = er->er_name_len;
649 ea->ea_type = er->er_type;
652 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
654 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
656 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
658 uint64_t *dataptr = GFS2_EA2DATAPTRS(ea);
659 const char *data = er->er_data;
660 unsigned int data_len = er->er_data_len;
664 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
665 for (x = 0; x < ea->ea_num_ptrs; x++) {
666 struct buffer_head *bh;
668 int mh_size = sizeof(struct gfs2_meta_header);
670 block = gfs2_alloc_meta(ip);
672 bh = gfs2_meta_new(ip->i_gl, block);
673 gfs2_trans_add_bh(ip->i_gl, bh, 1);
674 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
676 ip->i_di.di_blocks++;
678 copy = (data_len > sdp->sd_jbsize) ? sdp->sd_jbsize :
680 memcpy(bh->b_data + mh_size, data, copy);
681 if (copy < sdp->sd_jbsize)
682 memset(bh->b_data + mh_size + copy, 0,
683 sdp->sd_jbsize - copy);
685 *dataptr++ = cpu_to_be64((uint64_t)bh->b_blocknr);
692 gfs2_assert_withdraw(sdp, !data_len);
698 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
699 struct gfs2_ea_request *er,
702 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
704 ea_skeleton_call_t skeleton_call,
707 struct gfs2_alloc *al;
708 struct buffer_head *dibh;
711 al = gfs2_alloc_get(ip);
713 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
717 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
721 al->al_requested = blks;
723 error = gfs2_inplace_reserve(ip);
727 error = gfs2_trans_begin(ip->i_sbd,
728 blks + al->al_rgd->rd_ri.ri_length +
729 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
733 error = skeleton_call(ip, er, private);
737 error = gfs2_meta_inode_buffer(ip, &dibh);
739 if (er->er_flags & GFS2_ERF_MODE) {
740 gfs2_assert_withdraw(ip->i_sbd,
741 (ip->i_di.di_mode & S_IFMT) ==
742 (er->er_mode & S_IFMT));
743 ip->i_di.di_mode = er->er_mode;
745 ip->i_di.di_ctime = get_seconds();
746 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
747 gfs2_dinode_out(&ip->i_di, dibh->b_data);
752 gfs2_trans_end(ip->i_sbd);
755 gfs2_inplace_release(ip);
758 gfs2_quota_unlock(ip);
766 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
769 struct buffer_head *bh;
772 error = ea_alloc_blk(ip, &bh);
776 ip->i_di.di_eattr = bh->b_blocknr;
777 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
785 * ea_init - initializes a new eattr block
792 static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
794 unsigned int jbsize = ip->i_sbd->sd_jbsize;
795 unsigned int blks = 1;
797 if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
798 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
800 return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
803 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
805 uint32_t ea_size = GFS2_EA_SIZE(ea);
806 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
808 uint32_t new_size = GFS2_EA_REC_LEN(ea) - ea_size;
809 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
811 ea->ea_rec_len = cpu_to_be32(ea_size);
812 ea->ea_flags ^= last;
814 new->ea_rec_len = cpu_to_be32(new_size);
815 new->ea_flags = last;
820 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
821 struct gfs2_ea_location *el)
823 struct gfs2_ea_header *ea = el->el_ea;
824 struct gfs2_ea_header *prev = el->el_prev;
827 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
829 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
830 ea->ea_type = GFS2_EATYPE_UNUSED;
832 } else if (GFS2_EA2NEXT(prev) != ea) {
833 prev = GFS2_EA2NEXT(prev);
834 gfs2_assert_withdraw(ip->i_sbd, GFS2_EA2NEXT(prev) == ea);
837 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
838 prev->ea_rec_len = cpu_to_be32(len);
840 if (GFS2_EA_IS_LAST(ea))
841 prev->ea_flags |= GFS2_EAFLAG_LAST;
847 struct gfs2_ea_request *es_er;
848 struct gfs2_ea_location *es_el;
850 struct buffer_head *es_bh;
851 struct gfs2_ea_header *es_ea;
854 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
855 struct gfs2_ea_header *ea, struct ea_set *es)
857 struct gfs2_ea_request *er = es->es_er;
858 struct buffer_head *dibh;
861 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + 2 * RES_EATTR, 0);
865 gfs2_trans_add_bh(ip->i_gl, bh, 1);
868 ea = ea_split_ea(ea);
870 ea_write(ip, ea, er);
873 ea_set_remove_stuffed(ip, es->es_el);
875 error = gfs2_meta_inode_buffer(ip, &dibh);
879 if (er->er_flags & GFS2_ERF_MODE) {
880 gfs2_assert_withdraw(ip->i_sbd,
881 (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
882 ip->i_di.di_mode = er->er_mode;
884 ip->i_di.di_ctime = get_seconds();
885 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
886 gfs2_dinode_out(&ip->i_di, dibh->b_data);
889 gfs2_trans_end(ip->i_sbd);
894 static int ea_set_simple_alloc(struct gfs2_inode *ip,
895 struct gfs2_ea_request *er, void *private)
897 struct ea_set *es = private;
898 struct gfs2_ea_header *ea = es->es_ea;
901 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
904 ea = ea_split_ea(ea);
906 error = ea_write(ip, ea, er);
911 ea_set_remove_stuffed(ip, es->es_el);
916 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
917 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
920 struct ea_set *es = private;
925 stuffed = ea_calc_size(ip->i_sbd, es->es_er, &size);
927 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
928 if (GFS2_EA_REC_LEN(ea) < size)
930 if (!GFS2_EA_IS_STUFFED(ea)) {
931 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
936 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
942 error = ea_set_simple_noalloc(ip, bh, ea, es);
950 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
951 ip->i_sbd->sd_jbsize);
953 error = ea_alloc_skeleton(ip, es->es_er, blks,
954 ea_set_simple_alloc, es);
962 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
965 struct gfs2_sbd *sdp = ip->i_sbd;
966 struct buffer_head *indbh, *newbh;
969 int mh_size = sizeof(struct gfs2_meta_header);
971 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
974 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
975 DIO_START | DIO_WAIT, &indbh);
979 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
984 eablk = (uint64_t *)(indbh->b_data + mh_size);
985 end = eablk + sdp->sd_inptrs;
987 for (; eablk < end; eablk++)
996 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1000 blk = gfs2_alloc_meta(ip);
1002 indbh = gfs2_meta_new(ip->i_gl, blk);
1003 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1004 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1005 gfs2_buffer_clear_tail(indbh, mh_size);
1007 eablk = (uint64_t *)(indbh->b_data + mh_size);
1008 *eablk = cpu_to_be64(ip->i_di.di_eattr);
1009 ip->i_di.di_eattr = blk;
1010 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
1011 ip->i_di.di_blocks++;
1016 error = ea_alloc_blk(ip, &newbh);
1020 *eablk = cpu_to_be64((uint64_t)newbh->b_blocknr);
1021 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1027 ea_set_remove_stuffed(ip, (struct gfs2_ea_location *)private);
1035 static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1036 struct gfs2_ea_location *el)
1039 unsigned int blks = 2;
1042 memset(&es, 0, sizeof(struct ea_set));
1046 error = ea_foreach(ip, ea_set_simple, &es);
1052 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1054 if (GFS2_EAREQ_SIZE_STUFFED(er) > ip->i_sbd->sd_jbsize)
1055 blks += DIV_ROUND_UP(er->er_data_len, ip->i_sbd->sd_jbsize);
1057 return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1060 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1061 struct gfs2_ea_location *el)
1063 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1064 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1065 gfs2_assert_withdraw(ip->i_sbd,
1066 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1069 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1072 int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1074 struct gfs2_ea_location el;
1077 if (!ip->i_di.di_eattr) {
1078 if (er->er_flags & XATTR_REPLACE)
1080 return ea_init(ip, er);
1083 error = gfs2_ea_find(ip, er, &el);
1088 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1094 if (!(er->er_flags & XATTR_CREATE)) {
1095 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1096 error = ea_set_i(ip, er, &el);
1097 if (!error && unstuffed)
1098 ea_set_remove_unstuffed(ip, &el);
1104 if (!(er->er_flags & XATTR_REPLACE))
1105 error = ea_set_i(ip, er, NULL);
1111 int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1113 struct gfs2_holder i_gh;
1116 if (!er->er_name_len ||
1117 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1119 if (!er->er_data || !er->er_data_len) {
1121 er->er_data_len = 0;
1123 error = ea_check_size(ip->i_sbd, er);
1127 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1131 if (IS_IMMUTABLE(ip->i_vnode))
1134 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1136 gfs2_glock_dq_uninit(&i_gh);
1141 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1143 struct gfs2_ea_header *ea = el->el_ea;
1144 struct gfs2_ea_header *prev = el->el_prev;
1145 struct buffer_head *dibh;
1148 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1152 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1157 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1158 prev->ea_rec_len = cpu_to_be32(len);
1160 if (GFS2_EA_IS_LAST(ea))
1161 prev->ea_flags |= GFS2_EAFLAG_LAST;
1163 ea->ea_type = GFS2_EATYPE_UNUSED;
1165 error = gfs2_meta_inode_buffer(ip, &dibh);
1167 ip->i_di.di_ctime = get_seconds();
1168 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1169 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1173 gfs2_trans_end(ip->i_sbd);
1178 int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1180 struct gfs2_ea_location el;
1183 if (!ip->i_di.di_eattr)
1186 error = gfs2_ea_find(ip, er, &el);
1192 if (GFS2_EA_IS_STUFFED(el.el_ea))
1193 error = ea_remove_stuffed(ip, &el);
1195 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1204 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1205 * @ip: pointer to the inode of the target file
1206 * @er: request information
1211 int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1213 struct gfs2_holder i_gh;
1216 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1219 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1223 if (IS_IMMUTABLE(ip->i_vnode) || IS_APPEND(ip->i_vnode))
1226 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1228 gfs2_glock_dq_uninit(&i_gh);
1233 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1234 struct gfs2_ea_header *ea, char *data)
1236 struct gfs2_sbd *sdp = ip->i_sbd;
1237 struct buffer_head **bh;
1238 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1239 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1240 uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
1244 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
1248 error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1252 for (x = 0; x < nptrs; x++) {
1253 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
1263 for (x = 0; x < nptrs; x++) {
1264 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
1266 for (; x < nptrs; x++)
1270 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1271 for (; x < nptrs; x++)
1277 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1279 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header),
1281 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1283 amount -= sdp->sd_jbsize;
1284 data += sdp->sd_jbsize;
1295 gfs2_trans_end(sdp);
1301 int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1302 struct iattr *attr, char *data)
1304 struct buffer_head *dibh;
1307 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1308 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1312 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1313 memcpy(GFS2_EA2DATA(el->el_ea),
1315 GFS2_EA_DATA_LEN(el->el_ea));
1317 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1322 error = gfs2_meta_inode_buffer(ip, &dibh);
1324 error = inode_setattr(ip->i_vnode, attr);
1325 gfs2_assert_warn(ip->i_sbd, !error);
1326 gfs2_inode_attr_out(ip);
1327 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1328 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1332 gfs2_trans_end(ip->i_sbd);
1337 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1339 struct gfs2_sbd *sdp = ip->i_sbd;
1340 struct gfs2_rgrp_list rlist;
1341 struct buffer_head *indbh, *dibh;
1342 uint64_t *eablk, *end;
1343 unsigned int rg_blocks = 0;
1344 uint64_t bstart = 0;
1345 unsigned int blen = 0;
1346 unsigned int blks = 0;
1350 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1352 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
1353 DIO_START | DIO_WAIT, &indbh);
1357 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1362 eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1363 end = eablk + sdp->sd_inptrs;
1365 for (; eablk < end; eablk++) {
1370 bn = be64_to_cpu(*eablk);
1372 if (bstart + blen == bn)
1376 gfs2_rlist_add(sdp, &rlist, bstart);
1383 gfs2_rlist_add(sdp, &rlist, bstart);
1387 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1389 for (x = 0; x < rlist.rl_rgrps; x++) {
1390 struct gfs2_rgrpd *rgd;
1391 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1392 rg_blocks += rgd->rd_ri.ri_length;
1395 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1397 goto out_rlist_free;
1399 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
1400 RES_INDIRECT + RES_STATFS +
1405 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1407 eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1411 for (; eablk < end; eablk++) {
1416 bn = be64_to_cpu(*eablk);
1418 if (bstart + blen == bn)
1422 gfs2_free_meta(ip, bstart, blen);
1428 if (!ip->i_di.di_blocks)
1429 gfs2_consist_inode(ip);
1430 ip->i_di.di_blocks--;
1433 gfs2_free_meta(ip, bstart, blen);
1435 ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1437 error = gfs2_meta_inode_buffer(ip, &dibh);
1439 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1440 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1444 gfs2_trans_end(sdp);
1447 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1450 gfs2_rlist_free(&rlist);
1458 static int ea_dealloc_block(struct gfs2_inode *ip)
1460 struct gfs2_sbd *sdp = ip->i_sbd;
1461 struct gfs2_alloc *al = &ip->i_alloc;
1462 struct gfs2_rgrpd *rgd;
1463 struct buffer_head *dibh;
1466 rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1468 gfs2_consist_inode(ip);
1472 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1477 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE +
1478 RES_STATFS + RES_QUOTA, 1);
1482 gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1484 ip->i_di.di_eattr = 0;
1485 if (!ip->i_di.di_blocks)
1486 gfs2_consist_inode(ip);
1487 ip->i_di.di_blocks--;
1489 error = gfs2_meta_inode_buffer(ip, &dibh);
1491 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1492 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1496 gfs2_trans_end(sdp);
1499 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1505 * gfs2_ea_dealloc - deallocate the extended attribute fork
1511 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1513 struct gfs2_alloc *al;
1516 al = gfs2_alloc_get(ip);
1518 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1522 error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
1526 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1530 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1531 error = ea_dealloc_indirect(ip);
1536 error = ea_dealloc_block(ip);
1539 gfs2_glock_dq_uninit(&al->al_ri_gh);
1542 gfs2_quota_unhold(ip);