2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/xattr.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <asm/semaphore.h>
18 #include <asm/uaccess.h>
21 #include "lm_interface.h"
35 * ea_calc_size - returns the acutal number of bytes the request will take up
36 * (not counting any unstuffed data blocks)
41 * Returns: 1 if the EA should be stuffed
44 static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
47 *size = GFS2_EAREQ_SIZE_STUFFED(er);
48 if (*size <= sdp->sd_jbsize)
51 *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
56 static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
60 if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
63 ea_calc_size(sdp, er, &size);
65 /* This can only happen with 512 byte blocks */
66 if (size > sdp->sd_jbsize)
72 typedef int (*ea_call_t) (struct gfs2_inode *ip,
73 struct buffer_head *bh,
74 struct gfs2_ea_header *ea,
75 struct gfs2_ea_header *prev,
78 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
79 ea_call_t ea_call, void *data)
81 struct gfs2_ea_header *ea, *prev = NULL;
84 if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_EA))
87 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
88 if (!GFS2_EA_REC_LEN(ea))
90 if (!(bh->b_data <= (char *)ea &&
91 (char *)GFS2_EA2NEXT(ea) <=
92 bh->b_data + bh->b_size))
94 if (!GFS2_EATYPE_VALID(ea->ea_type))
97 error = ea_call(ip, bh, ea, prev, data);
101 if (GFS2_EA_IS_LAST(ea)) {
102 if ((char *)GFS2_EA2NEXT(ea) !=
103 bh->b_data + bh->b_size)
112 gfs2_consist_inode(ip);
116 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
118 struct buffer_head *bh, *eabh;
119 uint64_t *eablk, *end;
122 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
123 DIO_START | DIO_WAIT, &bh);
127 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
128 error = ea_foreach_i(ip, bh, ea_call, data);
132 if (gfs2_metatype_check(ip->i_sbd, bh, GFS2_METATYPE_IN)) {
137 eablk = (uint64_t *)(bh->b_data + sizeof(struct gfs2_meta_header));
138 end = eablk + ip->i_sbd->sd_inptrs;
140 for (; eablk < end; eablk++) {
145 bn = be64_to_cpu(*eablk);
147 error = gfs2_meta_read(ip->i_gl, bn, DIO_START | DIO_WAIT,
151 error = ea_foreach_i(ip, eabh, ea_call, data);
163 struct gfs2_ea_request *ef_er;
164 struct gfs2_ea_location *ef_el;
167 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
168 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
171 struct ea_find *ef = private;
172 struct gfs2_ea_request *er = ef->ef_er;
174 if (ea->ea_type == GFS2_EATYPE_UNUSED)
177 if (ea->ea_type == er->er_type) {
178 if (ea->ea_name_len == er->er_name_len &&
179 !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
180 struct gfs2_ea_location *el = ef->ef_el;
190 else if ((ip->i_di.di_flags & GFS2_DIF_EA_PACKED) &&
191 er->er_type == GFS2_EATYPE_SYS)
198 int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
199 struct gfs2_ea_location *el)
207 memset(el, 0, sizeof(struct gfs2_ea_location));
209 error = ea_foreach(ip, ea_find_i, &ef);
217 * ea_dealloc_unstuffed -
224 * Take advantage of the fact that all unstuffed blocks are
225 * allocated from the same RG. But watch, this may not always
231 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
232 struct gfs2_ea_header *ea,
233 struct gfs2_ea_header *prev, void *private)
235 int *leave = private;
236 struct gfs2_sbd *sdp = ip->i_sbd;
237 struct gfs2_rgrpd *rgd;
238 struct gfs2_holder rg_gh;
239 struct buffer_head *dibh;
240 uint64_t *dataptrs, bn = 0;
242 unsigned int blen = 0;
243 unsigned int blks = 0;
247 if (GFS2_EA_IS_STUFFED(ea))
250 dataptrs = GFS2_EA2DATAPTRS(ea);
251 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++)
254 bn = be64_to_cpu(*dataptrs);
259 rgd = gfs2_blk2rgrpd(sdp, bn);
261 gfs2_consist_inode(ip);
265 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
269 error = gfs2_trans_begin(sdp, rgd->rd_ri.ri_length +
270 RES_DINODE + RES_EATTR + RES_STATFS +
275 gfs2_trans_add_bh(ip->i_gl, bh, 1);
277 dataptrs = GFS2_EA2DATAPTRS(ea);
278 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
281 bn = be64_to_cpu(*dataptrs);
283 if (bstart + blen == bn)
287 gfs2_free_meta(ip, bstart, blen);
293 if (!ip->i_di.di_blocks)
294 gfs2_consist_inode(ip);
295 ip->i_di.di_blocks--;
298 gfs2_free_meta(ip, bstart, blen);
300 if (prev && !leave) {
303 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
304 prev->ea_rec_len = cpu_to_be32(len);
306 if (GFS2_EA_IS_LAST(ea))
307 prev->ea_flags |= GFS2_EAFLAG_LAST;
309 ea->ea_type = GFS2_EATYPE_UNUSED;
313 error = gfs2_meta_inode_buffer(ip, &dibh);
315 ip->i_di.di_ctime = get_seconds();
316 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
317 gfs2_dinode_out(&ip->i_di, dibh->b_data);
324 gfs2_glock_dq_uninit(&rg_gh);
329 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
330 struct gfs2_ea_header *ea,
331 struct gfs2_ea_header *prev, int leave)
333 struct gfs2_alloc *al;
336 al = gfs2_alloc_get(ip);
338 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
342 error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
346 error = ea_dealloc_unstuffed(ip,
348 (leave) ? &error : NULL);
350 gfs2_glock_dq_uninit(&al->al_ri_gh);
353 gfs2_quota_unhold(ip);
363 static int gfs2_ea_repack_i(struct gfs2_inode *ip)
368 int gfs2_ea_repack(struct gfs2_inode *ip)
370 struct gfs2_holder gh;
373 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
377 /* Some sort of permissions checking would be nice */
379 error = gfs2_ea_repack_i(ip);
381 gfs2_glock_dq_uninit(&gh);
389 struct gfs2_ea_request *ei_er;
390 unsigned int ei_size;
393 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
394 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
397 struct ea_list *ei = private;
398 struct gfs2_ea_request *er = ei->ei_er;
399 unsigned int ea_size = GFS2_EA_STRLEN(ea);
401 if (ea->ea_type == GFS2_EATYPE_UNUSED)
404 if (er->er_data_len) {
409 if (ei->ei_size + ea_size > er->er_data_len)
412 if (ea->ea_type == GFS2_EATYPE_USR) {
420 memcpy(er->er_data + ei->ei_size,
422 memcpy(er->er_data + ei->ei_size + l,
425 memcpy(er->er_data + ei->ei_size +
430 ei->ei_size += ea_size;
440 * Returns: actual size of data on success, -errno on error
443 int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
445 struct gfs2_holder i_gh;
448 if (!er->er_data || !er->er_data_len) {
453 error = gfs2_glock_nq_init(ip->i_gl,
454 LM_ST_SHARED, LM_FLAG_ANY,
459 if (ip->i_di.di_eattr) {
460 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
462 error = ea_foreach(ip, ea_list_i, &ei);
467 gfs2_glock_dq_uninit(&i_gh);
473 * ea_get_unstuffed - actually copies the unstuffed data into the
482 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
485 struct gfs2_sbd *sdp = ip->i_sbd;
486 struct buffer_head **bh;
487 unsigned int amount = GFS2_EA_DATA_LEN(ea);
488 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
489 uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
493 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
497 for (x = 0; x < nptrs; x++) {
498 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
508 for (x = 0; x < nptrs; x++) {
509 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
511 for (; x < nptrs; x++)
515 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
516 for (; x < nptrs; x++)
523 bh[x]->b_data + sizeof(struct gfs2_meta_header),
524 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
526 amount -= sdp->sd_jbsize;
527 data += sdp->sd_jbsize;
538 int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
541 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
543 GFS2_EA2DATA(el->el_ea),
544 GFS2_EA_DATA_LEN(el->el_ea));
547 return ea_get_unstuffed(ip, el->el_ea, data);
555 * Returns: actual size of data on success, -errno on error
558 int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
560 struct gfs2_ea_location el;
563 if (!ip->i_di.di_eattr)
566 error = gfs2_ea_find(ip, er, &el);
572 if (er->er_data_len) {
573 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
576 error = gfs2_ea_get_copy(ip, &el, er->er_data);
579 error = GFS2_EA_DATA_LEN(el.el_ea);
591 * Returns: actual size of data on success, -errno on error
594 int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
596 struct gfs2_holder i_gh;
599 if (!er->er_name_len ||
600 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
602 if (!er->er_data || !er->er_data_len) {
607 error = gfs2_glock_nq_init(ip->i_gl,
608 LM_ST_SHARED, LM_FLAG_ANY,
613 error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
615 gfs2_glock_dq_uninit(&i_gh);
621 * ea_alloc_blk - allocates a new block for extended attributes.
622 * @ip: A pointer to the inode that's getting extended attributes
628 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
630 struct gfs2_sbd *sdp = ip->i_sbd;
631 struct gfs2_ea_header *ea;
634 block = gfs2_alloc_meta(ip);
636 *bhp = gfs2_meta_new(ip->i_gl, block);
637 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
638 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
639 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
641 ea = GFS2_EA_BH2FIRST(*bhp);
642 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
643 ea->ea_type = GFS2_EATYPE_UNUSED;
644 ea->ea_flags = GFS2_EAFLAG_LAST;
647 ip->i_di.di_blocks++;
653 * ea_write - writes the request info to an ea, creating new blocks if
655 * @ip: inode that is being modified
656 * @ea: the location of the new ea in a block
657 * @er: the write request
659 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
664 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
665 struct gfs2_ea_request *er)
667 struct gfs2_sbd *sdp = ip->i_sbd;
669 ea->ea_data_len = cpu_to_be32(er->er_data_len);
670 ea->ea_name_len = er->er_name_len;
671 ea->ea_type = er->er_type;
674 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
676 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
678 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
680 uint64_t *dataptr = GFS2_EA2DATAPTRS(ea);
681 const char *data = er->er_data;
682 unsigned int data_len = er->er_data_len;
686 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
687 for (x = 0; x < ea->ea_num_ptrs; x++) {
688 struct buffer_head *bh;
690 int mh_size = sizeof(struct gfs2_meta_header);
692 block = gfs2_alloc_meta(ip);
694 bh = gfs2_meta_new(ip->i_gl, block);
695 gfs2_trans_add_bh(ip->i_gl, bh, 1);
696 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
698 ip->i_di.di_blocks++;
700 copy = (data_len > sdp->sd_jbsize) ? sdp->sd_jbsize :
702 memcpy(bh->b_data + mh_size, data, copy);
703 if (copy < sdp->sd_jbsize)
704 memset(bh->b_data + mh_size + copy, 0,
705 sdp->sd_jbsize - copy);
707 *dataptr++ = cpu_to_be64((uint64_t)bh->b_blocknr);
714 gfs2_assert_withdraw(sdp, !data_len);
720 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
721 struct gfs2_ea_request *er,
724 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
726 ea_skeleton_call_t skeleton_call,
729 struct gfs2_alloc *al;
730 struct buffer_head *dibh;
733 al = gfs2_alloc_get(ip);
735 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
739 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
743 al->al_requested = blks;
745 error = gfs2_inplace_reserve(ip);
749 error = gfs2_trans_begin(ip->i_sbd,
750 blks + al->al_rgd->rd_ri.ri_length +
751 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
755 error = skeleton_call(ip, er, private);
759 error = gfs2_meta_inode_buffer(ip, &dibh);
761 if (er->er_flags & GFS2_ERF_MODE) {
762 gfs2_assert_withdraw(ip->i_sbd,
763 (ip->i_di.di_mode & S_IFMT) ==
764 (er->er_mode & S_IFMT));
765 ip->i_di.di_mode = er->er_mode;
767 ip->i_di.di_ctime = get_seconds();
768 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
769 gfs2_dinode_out(&ip->i_di, dibh->b_data);
774 gfs2_trans_end(ip->i_sbd);
777 gfs2_inplace_release(ip);
780 gfs2_quota_unlock(ip);
788 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
791 struct buffer_head *bh;
794 error = ea_alloc_blk(ip, &bh);
798 ip->i_di.di_eattr = bh->b_blocknr;
799 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
807 * ea_init - initializes a new eattr block
814 static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
816 unsigned int jbsize = ip->i_sbd->sd_jbsize;
817 unsigned int blks = 1;
819 if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
820 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
822 return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
825 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
827 uint32_t ea_size = GFS2_EA_SIZE(ea);
828 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
830 uint32_t new_size = GFS2_EA_REC_LEN(ea) - ea_size;
831 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
833 ea->ea_rec_len = cpu_to_be32(ea_size);
834 ea->ea_flags ^= last;
836 new->ea_rec_len = cpu_to_be32(new_size);
837 new->ea_flags = last;
842 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
843 struct gfs2_ea_location *el)
845 struct gfs2_ea_header *ea = el->el_ea;
846 struct gfs2_ea_header *prev = el->el_prev;
849 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
851 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
852 ea->ea_type = GFS2_EATYPE_UNUSED;
854 } else if (GFS2_EA2NEXT(prev) != ea) {
855 prev = GFS2_EA2NEXT(prev);
856 gfs2_assert_withdraw(ip->i_sbd, GFS2_EA2NEXT(prev) == ea);
859 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
860 prev->ea_rec_len = cpu_to_be32(len);
862 if (GFS2_EA_IS_LAST(ea))
863 prev->ea_flags |= GFS2_EAFLAG_LAST;
869 struct gfs2_ea_request *es_er;
870 struct gfs2_ea_location *es_el;
872 struct buffer_head *es_bh;
873 struct gfs2_ea_header *es_ea;
876 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
877 struct gfs2_ea_header *ea, struct ea_set *es)
879 struct gfs2_ea_request *er = es->es_er;
880 struct buffer_head *dibh;
883 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + 2 * RES_EATTR, 0);
887 gfs2_trans_add_bh(ip->i_gl, bh, 1);
890 ea = ea_split_ea(ea);
892 ea_write(ip, ea, er);
895 ea_set_remove_stuffed(ip, es->es_el);
897 error = gfs2_meta_inode_buffer(ip, &dibh);
901 if (er->er_flags & GFS2_ERF_MODE) {
902 gfs2_assert_withdraw(ip->i_sbd,
903 (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
904 ip->i_di.di_mode = er->er_mode;
906 ip->i_di.di_ctime = get_seconds();
907 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
908 gfs2_dinode_out(&ip->i_di, dibh->b_data);
911 gfs2_trans_end(ip->i_sbd);
916 static int ea_set_simple_alloc(struct gfs2_inode *ip,
917 struct gfs2_ea_request *er, void *private)
919 struct ea_set *es = private;
920 struct gfs2_ea_header *ea = es->es_ea;
923 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
926 ea = ea_split_ea(ea);
928 error = ea_write(ip, ea, er);
933 ea_set_remove_stuffed(ip, es->es_el);
938 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
939 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
942 struct ea_set *es = private;
947 stuffed = ea_calc_size(ip->i_sbd, es->es_er, &size);
949 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
950 if (GFS2_EA_REC_LEN(ea) < size)
952 if (!GFS2_EA_IS_STUFFED(ea)) {
953 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
958 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
964 error = ea_set_simple_noalloc(ip, bh, ea, es);
972 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
973 ip->i_sbd->sd_jbsize);
975 error = ea_alloc_skeleton(ip, es->es_er, blks,
976 ea_set_simple_alloc, es);
984 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
987 struct gfs2_sbd *sdp = ip->i_sbd;
988 struct buffer_head *indbh, *newbh;
991 int mh_size = sizeof(struct gfs2_meta_header);
993 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
996 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
997 DIO_START | DIO_WAIT, &indbh);
1001 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1006 eablk = (uint64_t *)(indbh->b_data + mh_size);
1007 end = eablk + sdp->sd_inptrs;
1009 for (; eablk < end; eablk++)
1018 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1022 blk = gfs2_alloc_meta(ip);
1024 indbh = gfs2_meta_new(ip->i_gl, blk);
1025 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1026 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1027 gfs2_buffer_clear_tail(indbh, mh_size);
1029 eablk = (uint64_t *)(indbh->b_data + mh_size);
1030 *eablk = cpu_to_be64(ip->i_di.di_eattr);
1031 ip->i_di.di_eattr = blk;
1032 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
1033 ip->i_di.di_blocks++;
1038 error = ea_alloc_blk(ip, &newbh);
1042 *eablk = cpu_to_be64((uint64_t)newbh->b_blocknr);
1043 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1049 ea_set_remove_stuffed(ip, (struct gfs2_ea_location *)private);
1057 static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1058 struct gfs2_ea_location *el)
1061 unsigned int blks = 2;
1064 memset(&es, 0, sizeof(struct ea_set));
1068 error = ea_foreach(ip, ea_set_simple, &es);
1074 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1076 if (GFS2_EAREQ_SIZE_STUFFED(er) > ip->i_sbd->sd_jbsize)
1077 blks += DIV_ROUND_UP(er->er_data_len, ip->i_sbd->sd_jbsize);
1079 return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1082 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1083 struct gfs2_ea_location *el)
1085 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1086 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1087 gfs2_assert_withdraw(ip->i_sbd,
1088 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1091 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1094 int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1096 struct gfs2_ea_location el;
1099 if (!ip->i_di.di_eattr) {
1100 if (er->er_flags & XATTR_REPLACE)
1102 return ea_init(ip, er);
1105 error = gfs2_ea_find(ip, er, &el);
1110 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1116 if (!(er->er_flags & XATTR_CREATE)) {
1117 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1118 error = ea_set_i(ip, er, &el);
1119 if (!error && unstuffed)
1120 ea_set_remove_unstuffed(ip, &el);
1126 if (!(er->er_flags & XATTR_REPLACE))
1127 error = ea_set_i(ip, er, NULL);
1133 int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1135 struct gfs2_holder i_gh;
1138 if (!er->er_name_len ||
1139 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1141 if (!er->er_data || !er->er_data_len) {
1143 er->er_data_len = 0;
1145 error = ea_check_size(ip->i_sbd, er);
1149 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1153 if (IS_IMMUTABLE(ip->i_vnode))
1156 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1158 gfs2_glock_dq_uninit(&i_gh);
1163 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1165 struct gfs2_ea_header *ea = el->el_ea;
1166 struct gfs2_ea_header *prev = el->el_prev;
1167 struct buffer_head *dibh;
1170 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1174 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1179 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1180 prev->ea_rec_len = cpu_to_be32(len);
1182 if (GFS2_EA_IS_LAST(ea))
1183 prev->ea_flags |= GFS2_EAFLAG_LAST;
1185 ea->ea_type = GFS2_EATYPE_UNUSED;
1187 error = gfs2_meta_inode_buffer(ip, &dibh);
1189 ip->i_di.di_ctime = get_seconds();
1190 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1191 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1195 gfs2_trans_end(ip->i_sbd);
1200 int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1202 struct gfs2_ea_location el;
1205 if (!ip->i_di.di_eattr)
1208 error = gfs2_ea_find(ip, er, &el);
1214 if (GFS2_EA_IS_STUFFED(el.el_ea))
1215 error = ea_remove_stuffed(ip, &el);
1217 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1226 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1227 * @ip: pointer to the inode of the target file
1228 * @er: request information
1233 int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1235 struct gfs2_holder i_gh;
1238 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1241 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1245 if (IS_IMMUTABLE(ip->i_vnode) || IS_APPEND(ip->i_vnode))
1248 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1250 gfs2_glock_dq_uninit(&i_gh);
1255 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1256 struct gfs2_ea_header *ea, char *data)
1258 struct gfs2_sbd *sdp = ip->i_sbd;
1259 struct buffer_head **bh;
1260 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1261 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1262 uint64_t *dataptrs = GFS2_EA2DATAPTRS(ea);
1266 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
1270 error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1274 for (x = 0; x < nptrs; x++) {
1275 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs),
1285 for (x = 0; x < nptrs; x++) {
1286 error = gfs2_meta_reread(sdp, bh[x], DIO_WAIT);
1288 for (; x < nptrs; x++)
1292 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1293 for (; x < nptrs; x++)
1299 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1301 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header),
1303 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1305 amount -= sdp->sd_jbsize;
1306 data += sdp->sd_jbsize;
1317 gfs2_trans_end(sdp);
1323 int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1324 struct iattr *attr, char *data)
1326 struct buffer_head *dibh;
1329 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1330 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE + RES_EATTR, 0);
1334 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1335 memcpy(GFS2_EA2DATA(el->el_ea),
1337 GFS2_EA_DATA_LEN(el->el_ea));
1339 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1344 error = gfs2_meta_inode_buffer(ip, &dibh);
1346 error = inode_setattr(ip->i_vnode, attr);
1347 gfs2_assert_warn(ip->i_sbd, !error);
1348 gfs2_inode_attr_out(ip);
1349 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1350 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1354 gfs2_trans_end(ip->i_sbd);
1359 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1361 struct gfs2_sbd *sdp = ip->i_sbd;
1362 struct gfs2_rgrp_list rlist;
1363 struct buffer_head *indbh, *dibh;
1364 uint64_t *eablk, *end;
1365 unsigned int rg_blocks = 0;
1366 uint64_t bstart = 0;
1367 unsigned int blen = 0;
1368 unsigned int blks = 0;
1372 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1374 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr,
1375 DIO_START | DIO_WAIT, &indbh);
1379 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1384 eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1385 end = eablk + sdp->sd_inptrs;
1387 for (; eablk < end; eablk++) {
1392 bn = be64_to_cpu(*eablk);
1394 if (bstart + blen == bn)
1398 gfs2_rlist_add(sdp, &rlist, bstart);
1405 gfs2_rlist_add(sdp, &rlist, bstart);
1409 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1411 for (x = 0; x < rlist.rl_rgrps; x++) {
1412 struct gfs2_rgrpd *rgd;
1413 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1414 rg_blocks += rgd->rd_ri.ri_length;
1417 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1419 goto out_rlist_free;
1421 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
1422 RES_INDIRECT + RES_STATFS +
1427 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1429 eablk = (uint64_t *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1433 for (; eablk < end; eablk++) {
1438 bn = be64_to_cpu(*eablk);
1440 if (bstart + blen == bn)
1444 gfs2_free_meta(ip, bstart, blen);
1450 if (!ip->i_di.di_blocks)
1451 gfs2_consist_inode(ip);
1452 ip->i_di.di_blocks--;
1455 gfs2_free_meta(ip, bstart, blen);
1457 ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1459 error = gfs2_meta_inode_buffer(ip, &dibh);
1461 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1462 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1466 gfs2_trans_end(sdp);
1469 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1472 gfs2_rlist_free(&rlist);
1480 static int ea_dealloc_block(struct gfs2_inode *ip)
1482 struct gfs2_sbd *sdp = ip->i_sbd;
1483 struct gfs2_alloc *al = &ip->i_alloc;
1484 struct gfs2_rgrpd *rgd;
1485 struct buffer_head *dibh;
1488 rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1490 gfs2_consist_inode(ip);
1494 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1499 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE +
1500 RES_STATFS + RES_QUOTA, 1);
1504 gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1506 ip->i_di.di_eattr = 0;
1507 if (!ip->i_di.di_blocks)
1508 gfs2_consist_inode(ip);
1509 ip->i_di.di_blocks--;
1511 error = gfs2_meta_inode_buffer(ip, &dibh);
1513 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1514 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1518 gfs2_trans_end(sdp);
1521 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1527 * gfs2_ea_dealloc - deallocate the extended attribute fork
1533 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1535 struct gfs2_alloc *al;
1538 al = gfs2_alloc_get(ip);
1540 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1544 error = gfs2_rindex_hold(ip->i_sbd, &al->al_ri_gh);
1548 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1552 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1553 error = ea_dealloc_indirect(ip);
1558 error = ea_dealloc_block(ip);
1561 gfs2_glock_dq_uninit(&al->al_ri_gh);
1564 gfs2_quota_unhold(ip);