ext4: fix error handling in ext4_create_journal
[linux-2.6] / fs / hfs / btree.c
1 /*
2  *  linux/fs/hfs/btree.c
3  *
4  * Copyright (C) 2001
5  * Brad Boyer (flar@allandria.com)
6  * (C) 2003 Ardis Technologies <roman@ardistech.com>
7  *
8  * Handle opening/closing btree
9  */
10
11 #include <linux/pagemap.h>
12 #include <linux/log2.h>
13
14 #include "btree.h"
15
16 /* Get a reference to a B*Tree and do some initial checks */
17 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp)
18 {
19         struct hfs_btree *tree;
20         struct hfs_btree_header_rec *head;
21         struct address_space *mapping;
22         struct page *page;
23         unsigned int size;
24
25         tree = kzalloc(sizeof(*tree), GFP_KERNEL);
26         if (!tree)
27                 return NULL;
28
29         init_MUTEX(&tree->tree_lock);
30         spin_lock_init(&tree->hash_lock);
31         /* Set the correct compare function */
32         tree->sb = sb;
33         tree->cnid = id;
34         tree->keycmp = keycmp;
35
36         tree->inode = iget_locked(sb, id);
37         if (!tree->inode)
38                 goto free_tree;
39         BUG_ON(!(tree->inode->i_state & I_NEW));
40         {
41         struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
42         HFS_I(tree->inode)->flags = 0;
43         init_MUTEX(&HFS_I(tree->inode)->extents_lock);
44         switch (id) {
45         case HFS_EXT_CNID:
46                 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
47                                     mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
48                 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
49                 break;
50         case HFS_CAT_CNID:
51                 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
52                                     mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
53                 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
54                 break;
55         default:
56                 BUG();
57         }
58         }
59         unlock_new_inode(tree->inode);
60
61         mapping = tree->inode->i_mapping;
62         page = read_mapping_page(mapping, 0, NULL);
63         if (IS_ERR(page))
64                 goto free_tree;
65
66         /* Load the header */
67         head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
68         tree->root = be32_to_cpu(head->root);
69         tree->leaf_count = be32_to_cpu(head->leaf_count);
70         tree->leaf_head = be32_to_cpu(head->leaf_head);
71         tree->leaf_tail = be32_to_cpu(head->leaf_tail);
72         tree->node_count = be32_to_cpu(head->node_count);
73         tree->free_nodes = be32_to_cpu(head->free_nodes);
74         tree->attributes = be32_to_cpu(head->attributes);
75         tree->node_size = be16_to_cpu(head->node_size);
76         tree->max_key_len = be16_to_cpu(head->max_key_len);
77         tree->depth = be16_to_cpu(head->depth);
78
79         size = tree->node_size;
80         if (!is_power_of_2(size))
81                 goto fail_page;
82         if (!tree->node_count)
83                 goto fail_page;
84         tree->node_size_shift = ffs(size) - 1;
85         tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
86
87         kunmap(page);
88         page_cache_release(page);
89         return tree;
90
91  fail_page:
92         tree->inode->i_mapping->a_ops = &hfs_aops;
93         page_cache_release(page);
94  free_tree:
95         iput(tree->inode);
96         kfree(tree);
97         return NULL;
98 }
99
100 /* Release resources used by a btree */
101 void hfs_btree_close(struct hfs_btree *tree)
102 {
103         struct hfs_bnode *node;
104         int i;
105
106         if (!tree)
107                 return;
108
109         for (i = 0; i < NODE_HASH_SIZE; i++) {
110                 while ((node = tree->node_hash[i])) {
111                         tree->node_hash[i] = node->next_hash;
112                         if (atomic_read(&node->refcnt))
113                                 printk(KERN_ERR "hfs: node %d:%d still has %d user(s)!\n",
114                                         node->tree->cnid, node->this, atomic_read(&node->refcnt));
115                         hfs_bnode_free(node);
116                         tree->node_hash_cnt--;
117                 }
118         }
119         iput(tree->inode);
120         kfree(tree);
121 }
122
123 void hfs_btree_write(struct hfs_btree *tree)
124 {
125         struct hfs_btree_header_rec *head;
126         struct hfs_bnode *node;
127         struct page *page;
128
129         node = hfs_bnode_find(tree, 0);
130         if (IS_ERR(node))
131                 /* panic? */
132                 return;
133         /* Load the header */
134         page = node->page[0];
135         head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
136
137         head->root = cpu_to_be32(tree->root);
138         head->leaf_count = cpu_to_be32(tree->leaf_count);
139         head->leaf_head = cpu_to_be32(tree->leaf_head);
140         head->leaf_tail = cpu_to_be32(tree->leaf_tail);
141         head->node_count = cpu_to_be32(tree->node_count);
142         head->free_nodes = cpu_to_be32(tree->free_nodes);
143         head->attributes = cpu_to_be32(tree->attributes);
144         head->depth = cpu_to_be16(tree->depth);
145
146         kunmap(page);
147         set_page_dirty(page);
148         hfs_bnode_put(node);
149 }
150
151 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
152 {
153         struct hfs_btree *tree = prev->tree;
154         struct hfs_bnode *node;
155         struct hfs_bnode_desc desc;
156         __be32 cnid;
157
158         node = hfs_bnode_create(tree, idx);
159         if (IS_ERR(node))
160                 return node;
161
162         if (!tree->free_nodes)
163                 panic("FIXME!!!");
164         tree->free_nodes--;
165         prev->next = idx;
166         cnid = cpu_to_be32(idx);
167         hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
168
169         node->type = HFS_NODE_MAP;
170         node->num_recs = 1;
171         hfs_bnode_clear(node, 0, tree->node_size);
172         desc.next = 0;
173         desc.prev = 0;
174         desc.type = HFS_NODE_MAP;
175         desc.height = 0;
176         desc.num_recs = cpu_to_be16(1);
177         desc.reserved = 0;
178         hfs_bnode_write(node, &desc, 0, sizeof(desc));
179         hfs_bnode_write_u16(node, 14, 0x8000);
180         hfs_bnode_write_u16(node, tree->node_size - 2, 14);
181         hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
182
183         return node;
184 }
185
186 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
187 {
188         struct hfs_bnode *node, *next_node;
189         struct page **pagep;
190         u32 nidx, idx;
191         u16 off, len;
192         u8 *data, byte, m;
193         int i;
194
195         while (!tree->free_nodes) {
196                 struct inode *inode = tree->inode;
197                 u32 count;
198                 int res;
199
200                 res = hfs_extend_file(inode);
201                 if (res)
202                         return ERR_PTR(res);
203                 HFS_I(inode)->phys_size = inode->i_size =
204                                 (loff_t)HFS_I(inode)->alloc_blocks *
205                                 HFS_SB(tree->sb)->alloc_blksz;
206                 HFS_I(inode)->fs_blocks = inode->i_size >>
207                                           tree->sb->s_blocksize_bits;
208                 inode_set_bytes(inode, inode->i_size);
209                 count = inode->i_size >> tree->node_size_shift;
210                 tree->free_nodes = count - tree->node_count;
211                 tree->node_count = count;
212         }
213
214         nidx = 0;
215         node = hfs_bnode_find(tree, nidx);
216         if (IS_ERR(node))
217                 return node;
218         len = hfs_brec_lenoff(node, 2, &off);
219
220         off += node->page_offset;
221         pagep = node->page + (off >> PAGE_CACHE_SHIFT);
222         data = kmap(*pagep);
223         off &= ~PAGE_CACHE_MASK;
224         idx = 0;
225
226         for (;;) {
227                 while (len) {
228                         byte = data[off];
229                         if (byte != 0xff) {
230                                 for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
231                                         if (!(byte & m)) {
232                                                 idx += i;
233                                                 data[off] |= m;
234                                                 set_page_dirty(*pagep);
235                                                 kunmap(*pagep);
236                                                 tree->free_nodes--;
237                                                 mark_inode_dirty(tree->inode);
238                                                 hfs_bnode_put(node);
239                                                 return hfs_bnode_create(tree, idx);
240                                         }
241                                 }
242                         }
243                         if (++off >= PAGE_CACHE_SIZE) {
244                                 kunmap(*pagep);
245                                 data = kmap(*++pagep);
246                                 off = 0;
247                         }
248                         idx += 8;
249                         len--;
250                 }
251                 kunmap(*pagep);
252                 nidx = node->next;
253                 if (!nidx) {
254                         printk(KERN_DEBUG "hfs: create new bmap node...\n");
255                         next_node = hfs_bmap_new_bmap(node, idx);
256                 } else
257                         next_node = hfs_bnode_find(tree, nidx);
258                 hfs_bnode_put(node);
259                 if (IS_ERR(next_node))
260                         return next_node;
261                 node = next_node;
262
263                 len = hfs_brec_lenoff(node, 0, &off);
264                 off += node->page_offset;
265                 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
266                 data = kmap(*pagep);
267                 off &= ~PAGE_CACHE_MASK;
268         }
269 }
270
271 void hfs_bmap_free(struct hfs_bnode *node)
272 {
273         struct hfs_btree *tree;
274         struct page *page;
275         u16 off, len;
276         u32 nidx;
277         u8 *data, byte, m;
278
279         dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
280         tree = node->tree;
281         nidx = node->this;
282         node = hfs_bnode_find(tree, 0);
283         if (IS_ERR(node))
284                 return;
285         len = hfs_brec_lenoff(node, 2, &off);
286         while (nidx >= len * 8) {
287                 u32 i;
288
289                 nidx -= len * 8;
290                 i = node->next;
291                 hfs_bnode_put(node);
292                 if (!i) {
293                         /* panic */;
294                         printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
295                         return;
296                 }
297                 node = hfs_bnode_find(tree, i);
298                 if (IS_ERR(node))
299                         return;
300                 if (node->type != HFS_NODE_MAP) {
301                         /* panic */;
302                         printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
303                         hfs_bnode_put(node);
304                         return;
305                 }
306                 len = hfs_brec_lenoff(node, 0, &off);
307         }
308         off += node->page_offset + nidx / 8;
309         page = node->page[off >> PAGE_CACHE_SHIFT];
310         data = kmap(page);
311         off &= ~PAGE_CACHE_MASK;
312         m = 1 << (~nidx & 7);
313         byte = data[off];
314         if (!(byte & m)) {
315                 printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
316                 kunmap(page);
317                 hfs_bnode_put(node);
318                 return;
319         }
320         data[off] = byte & ~m;
321         set_page_dirty(page);
322         kunmap(page);
323         hfs_bnode_put(node);
324         tree->free_nodes++;
325         mark_inode_dirty(tree->inode);
326 }