ACPI: thinkpad-acpi: add locking to brightness subdriver
[linux-2.6] / fs / hfsplus / btree.c
1 /*
2  *  linux/fs/hfsplus/btree.c
3  *
4  * Copyright (C) 2001
5  * Brad Boyer (flar@allandria.com)
6  * (C) 2003 Ardis Technologies <roman@ardistech.com>
7  *
8  * Handle opening/closing btree
9  */
10
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/log2.h>
14
15 #include "hfsplus_fs.h"
16 #include "hfsplus_raw.h"
17
18
19 /* Get a reference to a B*Tree and do some initial checks */
20 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
21 {
22         struct hfs_btree *tree;
23         struct hfs_btree_header_rec *head;
24         struct address_space *mapping;
25         struct page *page;
26         unsigned int size;
27
28         tree = kzalloc(sizeof(*tree), GFP_KERNEL);
29         if (!tree)
30                 return NULL;
31
32         init_MUTEX(&tree->tree_lock);
33         spin_lock_init(&tree->hash_lock);
34         tree->sb = sb;
35         tree->cnid = id;
36         tree->inode = iget(sb, id);
37         if (!tree->inode)
38                 goto free_tree;
39
40         mapping = tree->inode->i_mapping;
41         page = read_mapping_page(mapping, 0, NULL);
42         if (IS_ERR(page))
43                 goto free_tree;
44
45         /* Load the header */
46         head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
47         tree->root = be32_to_cpu(head->root);
48         tree->leaf_count = be32_to_cpu(head->leaf_count);
49         tree->leaf_head = be32_to_cpu(head->leaf_head);
50         tree->leaf_tail = be32_to_cpu(head->leaf_tail);
51         tree->node_count = be32_to_cpu(head->node_count);
52         tree->free_nodes = be32_to_cpu(head->free_nodes);
53         tree->attributes = be32_to_cpu(head->attributes);
54         tree->node_size = be16_to_cpu(head->node_size);
55         tree->max_key_len = be16_to_cpu(head->max_key_len);
56         tree->depth = be16_to_cpu(head->depth);
57
58         /* Set the correct compare function */
59         if (id == HFSPLUS_EXT_CNID) {
60                 tree->keycmp = hfsplus_ext_cmp_key;
61         } else if (id == HFSPLUS_CAT_CNID) {
62                 if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) &&
63                     (head->key_type == HFSPLUS_KEY_BINARY))
64                         tree->keycmp = hfsplus_cat_bin_cmp_key;
65                 else {
66                         tree->keycmp = hfsplus_cat_case_cmp_key;
67                         HFSPLUS_SB(sb).flags |= HFSPLUS_SB_CASEFOLD;
68                 }
69         } else {
70                 printk(KERN_ERR "hfs: unknown B*Tree requested\n");
71                 goto fail_page;
72         }
73
74         size = tree->node_size;
75         if (!is_power_of_2(size))
76                 goto fail_page;
77         if (!tree->node_count)
78                 goto fail_page;
79         tree->node_size_shift = ffs(size) - 1;
80
81         tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
82
83         kunmap(page);
84         page_cache_release(page);
85         return tree;
86
87  fail_page:
88         tree->inode->i_mapping->a_ops = &hfsplus_aops;
89         page_cache_release(page);
90  free_tree:
91         iput(tree->inode);
92         kfree(tree);
93         return NULL;
94 }
95
96 /* Release resources used by a btree */
97 void hfs_btree_close(struct hfs_btree *tree)
98 {
99         struct hfs_bnode *node;
100         int i;
101
102         if (!tree)
103                 return;
104
105         for (i = 0; i < NODE_HASH_SIZE; i++) {
106                 while ((node = tree->node_hash[i])) {
107                         tree->node_hash[i] = node->next_hash;
108                         if (atomic_read(&node->refcnt))
109                                 printk(KERN_CRIT "hfs: node %d:%d still has %d user(s)!\n",
110                                         node->tree->cnid, node->this, atomic_read(&node->refcnt));
111                         hfs_bnode_free(node);
112                         tree->node_hash_cnt--;
113                 }
114         }
115         iput(tree->inode);
116         kfree(tree);
117 }
118
119 void hfs_btree_write(struct hfs_btree *tree)
120 {
121         struct hfs_btree_header_rec *head;
122         struct hfs_bnode *node;
123         struct page *page;
124
125         node = hfs_bnode_find(tree, 0);
126         if (IS_ERR(node))
127                 /* panic? */
128                 return;
129         /* Load the header */
130         page = node->page[0];
131         head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
132
133         head->root = cpu_to_be32(tree->root);
134         head->leaf_count = cpu_to_be32(tree->leaf_count);
135         head->leaf_head = cpu_to_be32(tree->leaf_head);
136         head->leaf_tail = cpu_to_be32(tree->leaf_tail);
137         head->node_count = cpu_to_be32(tree->node_count);
138         head->free_nodes = cpu_to_be32(tree->free_nodes);
139         head->attributes = cpu_to_be32(tree->attributes);
140         head->depth = cpu_to_be16(tree->depth);
141
142         kunmap(page);
143         set_page_dirty(page);
144         hfs_bnode_put(node);
145 }
146
147 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
148 {
149         struct hfs_btree *tree = prev->tree;
150         struct hfs_bnode *node;
151         struct hfs_bnode_desc desc;
152         __be32 cnid;
153
154         node = hfs_bnode_create(tree, idx);
155         if (IS_ERR(node))
156                 return node;
157
158         tree->free_nodes--;
159         prev->next = idx;
160         cnid = cpu_to_be32(idx);
161         hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
162
163         node->type = HFS_NODE_MAP;
164         node->num_recs = 1;
165         hfs_bnode_clear(node, 0, tree->node_size);
166         desc.next = 0;
167         desc.prev = 0;
168         desc.type = HFS_NODE_MAP;
169         desc.height = 0;
170         desc.num_recs = cpu_to_be16(1);
171         desc.reserved = 0;
172         hfs_bnode_write(node, &desc, 0, sizeof(desc));
173         hfs_bnode_write_u16(node, 14, 0x8000);
174         hfs_bnode_write_u16(node, tree->node_size - 2, 14);
175         hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
176
177         return node;
178 }
179
180 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
181 {
182         struct hfs_bnode *node, *next_node;
183         struct page **pagep;
184         u32 nidx, idx;
185         u16 off, len;
186         u8 *data, byte, m;
187         int i;
188
189         while (!tree->free_nodes) {
190                 struct inode *inode = tree->inode;
191                 u32 count;
192                 int res;
193
194                 res = hfsplus_file_extend(inode);
195                 if (res)
196                         return ERR_PTR(res);
197                 HFSPLUS_I(inode).phys_size = inode->i_size =
198                                 (loff_t)HFSPLUS_I(inode).alloc_blocks <<
199                                 HFSPLUS_SB(tree->sb).alloc_blksz_shift;
200                 HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks <<
201                                              HFSPLUS_SB(tree->sb).fs_shift;
202                 inode_set_bytes(inode, inode->i_size);
203                 count = inode->i_size >> tree->node_size_shift;
204                 tree->free_nodes = count - tree->node_count;
205                 tree->node_count = count;
206         }
207
208         nidx = 0;
209         node = hfs_bnode_find(tree, nidx);
210         if (IS_ERR(node))
211                 return node;
212         len = hfs_brec_lenoff(node, 2, &off);
213
214         off += node->page_offset;
215         pagep = node->page + (off >> PAGE_CACHE_SHIFT);
216         data = kmap(*pagep);
217         off &= ~PAGE_CACHE_MASK;
218         idx = 0;
219
220         for (;;) {
221                 while (len) {
222                         byte = data[off];
223                         if (byte != 0xff) {
224                                 for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
225                                         if (!(byte & m)) {
226                                                 idx += i;
227                                                 data[off] |= m;
228                                                 set_page_dirty(*pagep);
229                                                 kunmap(*pagep);
230                                                 tree->free_nodes--;
231                                                 mark_inode_dirty(tree->inode);
232                                                 hfs_bnode_put(node);
233                                                 return hfs_bnode_create(tree, idx);
234                                         }
235                                 }
236                         }
237                         if (++off >= PAGE_CACHE_SIZE) {
238                                 kunmap(*pagep);
239                                 data = kmap(*++pagep);
240                                 off = 0;
241                         }
242                         idx += 8;
243                         len--;
244                 }
245                 kunmap(*pagep);
246                 nidx = node->next;
247                 if (!nidx) {
248                         printk(KERN_DEBUG "hfs: create new bmap node...\n");
249                         next_node = hfs_bmap_new_bmap(node, idx);
250                 } else
251                         next_node = hfs_bnode_find(tree, nidx);
252                 hfs_bnode_put(node);
253                 if (IS_ERR(next_node))
254                         return next_node;
255                 node = next_node;
256
257                 len = hfs_brec_lenoff(node, 0, &off);
258                 off += node->page_offset;
259                 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
260                 data = kmap(*pagep);
261                 off &= ~PAGE_CACHE_MASK;
262         }
263 }
264
265 void hfs_bmap_free(struct hfs_bnode *node)
266 {
267         struct hfs_btree *tree;
268         struct page *page;
269         u16 off, len;
270         u32 nidx;
271         u8 *data, byte, m;
272
273         dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
274         BUG_ON(!node->this);
275         tree = node->tree;
276         nidx = node->this;
277         node = hfs_bnode_find(tree, 0);
278         if (IS_ERR(node))
279                 return;
280         len = hfs_brec_lenoff(node, 2, &off);
281         while (nidx >= len * 8) {
282                 u32 i;
283
284                 nidx -= len * 8;
285                 i = node->next;
286                 hfs_bnode_put(node);
287                 if (!i) {
288                         /* panic */;
289                         printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
290                         return;
291                 }
292                 node = hfs_bnode_find(tree, i);
293                 if (IS_ERR(node))
294                         return;
295                 if (node->type != HFS_NODE_MAP) {
296                         /* panic */;
297                         printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
298                         hfs_bnode_put(node);
299                         return;
300                 }
301                 len = hfs_brec_lenoff(node, 0, &off);
302         }
303         off += node->page_offset + nidx / 8;
304         page = node->page[off >> PAGE_CACHE_SHIFT];
305         data = kmap(page);
306         off &= ~PAGE_CACHE_MASK;
307         m = 1 << (~nidx & 7);
308         byte = data[off];
309         if (!(byte & m)) {
310                 printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
311                 kunmap(page);
312                 hfs_bnode_put(node);
313                 return;
314         }
315         data[off] = byte & ~m;
316         set_page_dirty(page);
317         kunmap(page);
318         hfs_bnode_put(node);
319         tree->free_nodes++;
320         mark_inode_dirty(tree->inode);
321 }