Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/hfsplus/extents.c | |
3 | * | |
4 | * Copyright (C) 2001 | |
5 | * Brad Boyer (flar@allandria.com) | |
6 | * (C) 2003 Ardis Technologies <roman@ardistech.com> | |
7 | * | |
8 | * Handling of Extents both in catalog and extents overflow trees | |
9 | */ | |
10 | ||
11 | #include <linux/errno.h> | |
12 | #include <linux/fs.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/version.h> | |
15 | ||
16 | #include "hfsplus_fs.h" | |
17 | #include "hfsplus_raw.h" | |
18 | ||
19 | /* Compare two extents keys, returns 0 on same, pos/neg for difference */ | |
20 | int hfsplus_ext_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2) | |
21 | { | |
22 | __be32 k1id, k2id; | |
23 | __be32 k1s, k2s; | |
24 | ||
25 | k1id = k1->ext.cnid; | |
26 | k2id = k2->ext.cnid; | |
27 | if (k1id != k2id) | |
28 | return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1; | |
29 | ||
30 | if (k1->ext.fork_type != k2->ext.fork_type) | |
31 | return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1; | |
32 | ||
33 | k1s = k1->ext.start_block; | |
34 | k2s = k2->ext.start_block; | |
35 | if (k1s == k2s) | |
36 | return 0; | |
37 | return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1; | |
38 | } | |
39 | ||
40 | static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid, | |
41 | u32 block, u8 type) | |
42 | { | |
43 | key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2); | |
44 | key->ext.cnid = cpu_to_be32(cnid); | |
45 | key->ext.start_block = cpu_to_be32(block); | |
46 | key->ext.fork_type = type; | |
47 | key->ext.pad = 0; | |
48 | } | |
49 | ||
50 | static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off) | |
51 | { | |
52 | int i; | |
53 | u32 count; | |
54 | ||
55 | for (i = 0; i < 8; ext++, i++) { | |
56 | count = be32_to_cpu(ext->block_count); | |
57 | if (off < count) | |
58 | return be32_to_cpu(ext->start_block) + off; | |
59 | off -= count; | |
60 | } | |
61 | /* panic? */ | |
62 | return 0; | |
63 | } | |
64 | ||
65 | static int hfsplus_ext_block_count(struct hfsplus_extent *ext) | |
66 | { | |
67 | int i; | |
68 | u32 count = 0; | |
69 | ||
70 | for (i = 0; i < 8; ext++, i++) | |
71 | count += be32_to_cpu(ext->block_count); | |
72 | return count; | |
73 | } | |
74 | ||
75 | static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext) | |
76 | { | |
77 | int i; | |
78 | ||
79 | ext += 7; | |
80 | for (i = 0; i < 7; ext--, i++) | |
81 | if (ext->block_count) | |
82 | break; | |
83 | return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count); | |
84 | } | |
85 | ||
86 | static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) | |
87 | { | |
88 | int res; | |
89 | ||
90 | hfsplus_ext_build_key(fd->search_key, inode->i_ino, HFSPLUS_I(inode).cached_start, | |
91 | HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); | |
92 | res = hfs_brec_find(fd); | |
93 | if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_NEW) { | |
94 | if (res != -ENOENT) | |
95 | return; | |
96 | hfs_brec_insert(fd, HFSPLUS_I(inode).cached_extents, sizeof(hfsplus_extent_rec)); | |
97 | HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | |
98 | } else { | |
99 | if (res) | |
100 | return; | |
101 | hfs_bnode_write(fd->bnode, HFSPLUS_I(inode).cached_extents, fd->entryoffset, fd->entrylength); | |
102 | HFSPLUS_I(inode).flags &= ~HFSPLUS_FLG_EXT_DIRTY; | |
103 | } | |
104 | } | |
105 | ||
106 | void hfsplus_ext_write_extent(struct inode *inode) | |
107 | { | |
108 | if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) { | |
109 | struct hfs_find_data fd; | |
110 | ||
111 | hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); | |
112 | __hfsplus_ext_write_extent(inode, &fd); | |
113 | hfs_find_exit(&fd); | |
114 | } | |
115 | } | |
116 | ||
117 | static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, | |
118 | struct hfsplus_extent *extent, | |
119 | u32 cnid, u32 block, u8 type) | |
120 | { | |
121 | int res; | |
122 | ||
123 | hfsplus_ext_build_key(fd->search_key, cnid, block, type); | |
124 | fd->key->ext.cnid = 0; | |
125 | res = hfs_brec_find(fd); | |
126 | if (res && res != -ENOENT) | |
127 | return res; | |
128 | if (fd->key->ext.cnid != fd->search_key->ext.cnid || | |
129 | fd->key->ext.fork_type != fd->search_key->ext.fork_type) | |
130 | return -ENOENT; | |
131 | if (fd->entrylength != sizeof(hfsplus_extent_rec)) | |
132 | return -EIO; | |
133 | hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfsplus_extent_rec)); | |
134 | return 0; | |
135 | } | |
136 | ||
137 | static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block) | |
138 | { | |
139 | int res; | |
140 | ||
141 | if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) | |
142 | __hfsplus_ext_write_extent(inode, fd); | |
143 | ||
144 | res = __hfsplus_ext_read_extent(fd, HFSPLUS_I(inode).cached_extents, inode->i_ino, | |
145 | block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); | |
146 | if (!res) { | |
147 | HFSPLUS_I(inode).cached_start = be32_to_cpu(fd->key->ext.start_block); | |
148 | HFSPLUS_I(inode).cached_blocks = hfsplus_ext_block_count(HFSPLUS_I(inode).cached_extents); | |
149 | } else { | |
150 | HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; | |
151 | HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | |
152 | } | |
153 | return res; | |
154 | } | |
155 | ||
156 | static int hfsplus_ext_read_extent(struct inode *inode, u32 block) | |
157 | { | |
158 | struct hfs_find_data fd; | |
159 | int res; | |
160 | ||
161 | if (block >= HFSPLUS_I(inode).cached_start && | |
162 | block < HFSPLUS_I(inode).cached_start + HFSPLUS_I(inode).cached_blocks) | |
163 | return 0; | |
164 | ||
165 | hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd); | |
166 | res = __hfsplus_ext_cache_extent(&fd, inode, block); | |
167 | hfs_find_exit(&fd); | |
168 | return res; | |
169 | } | |
170 | ||
171 | /* Get a block at iblock for inode, possibly allocating if create */ | |
172 | int hfsplus_get_block(struct inode *inode, sector_t iblock, | |
173 | struct buffer_head *bh_result, int create) | |
174 | { | |
175 | struct super_block *sb; | |
176 | int res = -EIO; | |
177 | u32 ablock, dblock, mask; | |
178 | int shift; | |
179 | ||
180 | sb = inode->i_sb; | |
181 | ||
182 | /* Convert inode block to disk allocation block */ | |
183 | shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits; | |
184 | ablock = iblock >> HFSPLUS_SB(sb).fs_shift; | |
185 | ||
186 | if (iblock >= HFSPLUS_I(inode).fs_blocks) { | |
187 | if (iblock > HFSPLUS_I(inode).fs_blocks || !create) | |
188 | return -EIO; | |
189 | if (ablock >= HFSPLUS_I(inode).alloc_blocks) { | |
190 | res = hfsplus_file_extend(inode); | |
191 | if (res) | |
192 | return res; | |
193 | } | |
194 | } else | |
195 | create = 0; | |
196 | ||
197 | if (ablock < HFSPLUS_I(inode).first_blocks) { | |
198 | dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock); | |
199 | goto done; | |
200 | } | |
201 | ||
202 | down(&HFSPLUS_I(inode).extents_lock); | |
203 | res = hfsplus_ext_read_extent(inode, ablock); | |
204 | if (!res) { | |
205 | dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock - | |
206 | HFSPLUS_I(inode).cached_start); | |
207 | } else { | |
208 | up(&HFSPLUS_I(inode).extents_lock); | |
209 | return -EIO; | |
210 | } | |
211 | up(&HFSPLUS_I(inode).extents_lock); | |
212 | ||
213 | done: | |
214 | dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); | |
215 | mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1; | |
216 | map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask)); | |
217 | if (create) { | |
218 | set_buffer_new(bh_result); | |
219 | HFSPLUS_I(inode).phys_size += sb->s_blocksize; | |
220 | HFSPLUS_I(inode).fs_blocks++; | |
221 | inode_add_bytes(inode, sb->s_blocksize); | |
222 | mark_inode_dirty(inode); | |
223 | } | |
224 | return 0; | |
225 | } | |
226 | ||
227 | static void hfsplus_dump_extent(struct hfsplus_extent *extent) | |
228 | { | |
229 | int i; | |
230 | ||
231 | dprint(DBG_EXTENT, " "); | |
232 | for (i = 0; i < 8; i++) | |
233 | dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block), | |
234 | be32_to_cpu(extent[i].block_count)); | |
235 | dprint(DBG_EXTENT, "\n"); | |
236 | } | |
237 | ||
238 | static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset, | |
239 | u32 alloc_block, u32 block_count) | |
240 | { | |
241 | u32 count, start; | |
242 | int i; | |
243 | ||
244 | hfsplus_dump_extent(extent); | |
245 | for (i = 0; i < 8; extent++, i++) { | |
246 | count = be32_to_cpu(extent->block_count); | |
247 | if (offset == count) { | |
248 | start = be32_to_cpu(extent->start_block); | |
249 | if (alloc_block != start + count) { | |
250 | if (++i >= 8) | |
251 | return -ENOSPC; | |
252 | extent++; | |
253 | extent->start_block = cpu_to_be32(alloc_block); | |
254 | } else | |
255 | block_count += count; | |
256 | extent->block_count = cpu_to_be32(block_count); | |
257 | return 0; | |
258 | } else if (offset < count) | |
259 | break; | |
260 | offset -= count; | |
261 | } | |
262 | /* panic? */ | |
263 | return -EIO; | |
264 | } | |
265 | ||
266 | static int hfsplus_free_extents(struct super_block *sb, | |
267 | struct hfsplus_extent *extent, | |
268 | u32 offset, u32 block_nr) | |
269 | { | |
270 | u32 count, start; | |
271 | int i; | |
272 | ||
273 | hfsplus_dump_extent(extent); | |
274 | for (i = 0; i < 8; extent++, i++) { | |
275 | count = be32_to_cpu(extent->block_count); | |
276 | if (offset == count) | |
277 | goto found; | |
278 | else if (offset < count) | |
279 | break; | |
280 | offset -= count; | |
281 | } | |
282 | /* panic? */ | |
283 | return -EIO; | |
284 | found: | |
285 | for (;;) { | |
286 | start = be32_to_cpu(extent->start_block); | |
287 | if (count <= block_nr) { | |
288 | hfsplus_block_free(sb, start, count); | |
289 | extent->block_count = 0; | |
290 | extent->start_block = 0; | |
291 | block_nr -= count; | |
292 | } else { | |
293 | count -= block_nr; | |
294 | hfsplus_block_free(sb, start + count, block_nr); | |
295 | extent->block_count = cpu_to_be32(count); | |
296 | block_nr = 0; | |
297 | } | |
298 | if (!block_nr || !i) | |
299 | return 0; | |
300 | i--; | |
301 | extent--; | |
302 | count = be32_to_cpu(extent->block_count); | |
303 | } | |
304 | } | |
305 | ||
306 | int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw *fork, int type) | |
307 | { | |
308 | struct hfs_find_data fd; | |
309 | hfsplus_extent_rec ext_entry; | |
310 | u32 total_blocks, blocks, start; | |
311 | int res, i; | |
312 | ||
313 | total_blocks = be32_to_cpu(fork->total_blocks); | |
314 | if (!total_blocks) | |
315 | return 0; | |
316 | ||
317 | blocks = 0; | |
318 | for (i = 0; i < 8; i++) | |
319 | blocks += be32_to_cpu(fork->extents[i].block_count); | |
320 | ||
321 | res = hfsplus_free_extents(sb, fork->extents, blocks, blocks); | |
322 | if (res) | |
323 | return res; | |
324 | if (total_blocks == blocks) | |
325 | return 0; | |
326 | ||
327 | hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); | |
328 | do { | |
329 | res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, | |
330 | total_blocks, type); | |
331 | if (res) | |
332 | break; | |
333 | start = be32_to_cpu(fd.key->ext.start_block); | |
334 | hfsplus_free_extents(sb, ext_entry, | |
335 | total_blocks - start, | |
336 | total_blocks); | |
337 | hfs_brec_remove(&fd); | |
338 | total_blocks = start; | |
339 | } while (total_blocks > blocks); | |
340 | hfs_find_exit(&fd); | |
341 | ||
342 | return res; | |
343 | } | |
344 | ||
345 | int hfsplus_file_extend(struct inode *inode) | |
346 | { | |
347 | struct super_block *sb = inode->i_sb; | |
348 | u32 start, len, goal; | |
349 | int res; | |
350 | ||
351 | if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) { | |
352 | // extend alloc file | |
353 | printk("extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8, | |
354 | HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks); | |
355 | return -ENOSPC; | |
356 | //BUG(); | |
357 | } | |
358 | ||
359 | down(&HFSPLUS_I(inode).extents_lock); | |
360 | if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks) | |
361 | goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents); | |
362 | else { | |
363 | res = hfsplus_ext_read_extent(inode, HFSPLUS_I(inode).alloc_blocks); | |
364 | if (res) | |
365 | goto out; | |
366 | goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).cached_extents); | |
367 | } | |
368 | ||
369 | len = HFSPLUS_I(inode).clump_blocks; | |
370 | start = hfsplus_block_allocate(sb, HFSPLUS_SB(sb).total_blocks, goal, &len); | |
371 | if (start >= HFSPLUS_SB(sb).total_blocks) { | |
372 | start = hfsplus_block_allocate(sb, goal, 0, &len); | |
373 | if (start >= goal) { | |
374 | res = -ENOSPC; | |
375 | goto out; | |
376 | } | |
377 | } | |
378 | ||
379 | dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); | |
380 | if (HFSPLUS_I(inode).alloc_blocks <= HFSPLUS_I(inode).first_blocks) { | |
381 | if (!HFSPLUS_I(inode).first_blocks) { | |
382 | dprint(DBG_EXTENT, "first extents\n"); | |
383 | /* no extents yet */ | |
384 | HFSPLUS_I(inode).first_extents[0].start_block = cpu_to_be32(start); | |
385 | HFSPLUS_I(inode).first_extents[0].block_count = cpu_to_be32(len); | |
386 | res = 0; | |
387 | } else { | |
388 | /* try to append to extents in inode */ | |
389 | res = hfsplus_add_extent(HFSPLUS_I(inode).first_extents, | |
390 | HFSPLUS_I(inode).alloc_blocks, | |
391 | start, len); | |
392 | if (res == -ENOSPC) | |
393 | goto insert_extent; | |
394 | } | |
395 | if (!res) { | |
396 | hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); | |
397 | HFSPLUS_I(inode).first_blocks += len; | |
398 | } | |
399 | } else { | |
400 | res = hfsplus_add_extent(HFSPLUS_I(inode).cached_extents, | |
401 | HFSPLUS_I(inode).alloc_blocks - | |
402 | HFSPLUS_I(inode).cached_start, | |
403 | start, len); | |
404 | if (!res) { | |
405 | hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); | |
406 | HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; | |
407 | HFSPLUS_I(inode).cached_blocks += len; | |
408 | } else if (res == -ENOSPC) | |
409 | goto insert_extent; | |
410 | } | |
411 | out: | |
412 | up(&HFSPLUS_I(inode).extents_lock); | |
413 | if (!res) { | |
414 | HFSPLUS_I(inode).alloc_blocks += len; | |
415 | mark_inode_dirty(inode); | |
416 | } | |
417 | return res; | |
418 | ||
419 | insert_extent: | |
420 | dprint(DBG_EXTENT, "insert new extent\n"); | |
421 | hfsplus_ext_write_extent(inode); | |
422 | ||
423 | memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); | |
424 | HFSPLUS_I(inode).cached_extents[0].start_block = cpu_to_be32(start); | |
425 | HFSPLUS_I(inode).cached_extents[0].block_count = cpu_to_be32(len); | |
426 | hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); | |
427 | HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW; | |
428 | HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).alloc_blocks; | |
429 | HFSPLUS_I(inode).cached_blocks = len; | |
430 | ||
431 | res = 0; | |
432 | goto out; | |
433 | } | |
434 | ||
435 | void hfsplus_file_truncate(struct inode *inode) | |
436 | { | |
437 | struct super_block *sb = inode->i_sb; | |
438 | struct hfs_find_data fd; | |
439 | u32 alloc_cnt, blk_cnt, start; | |
440 | int res; | |
441 | ||
442 | dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino, | |
443 | (long long)HFSPLUS_I(inode).phys_size, inode->i_size); | |
444 | if (inode->i_size > HFSPLUS_I(inode).phys_size) { | |
445 | struct address_space *mapping = inode->i_mapping; | |
446 | struct page *page; | |
447 | u32 size = inode->i_size - 1; | |
448 | int res; | |
449 | ||
450 | page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT); | |
451 | if (!page) | |
452 | return; | |
453 | size &= PAGE_CACHE_SIZE - 1; | |
454 | size++; | |
455 | res = mapping->a_ops->prepare_write(NULL, page, size, size); | |
456 | if (!res) | |
457 | res = mapping->a_ops->commit_write(NULL, page, size, size); | |
458 | if (res) | |
459 | inode->i_size = HFSPLUS_I(inode).phys_size; | |
460 | unlock_page(page); | |
461 | page_cache_release(page); | |
462 | mark_inode_dirty(inode); | |
463 | return; | |
464 | } | |
465 | blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift; | |
466 | alloc_cnt = HFSPLUS_I(inode).alloc_blocks; | |
467 | if (blk_cnt == alloc_cnt) | |
468 | goto out; | |
469 | ||
470 | down(&HFSPLUS_I(inode).extents_lock); | |
471 | hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd); | |
472 | while (1) { | |
473 | if (alloc_cnt == HFSPLUS_I(inode).first_blocks) { | |
474 | hfsplus_free_extents(sb, HFSPLUS_I(inode).first_extents, | |
475 | alloc_cnt, alloc_cnt - blk_cnt); | |
476 | hfsplus_dump_extent(HFSPLUS_I(inode).first_extents); | |
477 | HFSPLUS_I(inode).first_blocks = blk_cnt; | |
478 | break; | |
479 | } | |
480 | res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); | |
481 | if (res) | |
482 | break; | |
483 | start = HFSPLUS_I(inode).cached_start; | |
484 | hfsplus_free_extents(sb, HFSPLUS_I(inode).cached_extents, | |
485 | alloc_cnt - start, alloc_cnt - blk_cnt); | |
486 | hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents); | |
487 | if (blk_cnt > start) { | |
488 | HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY; | |
489 | break; | |
490 | } | |
491 | alloc_cnt = start; | |
492 | HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0; | |
493 | HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW); | |
494 | hfs_brec_remove(&fd); | |
495 | } | |
496 | hfs_find_exit(&fd); | |
497 | up(&HFSPLUS_I(inode).extents_lock); | |
498 | ||
499 | HFSPLUS_I(inode).alloc_blocks = blk_cnt; | |
500 | out: | |
501 | HFSPLUS_I(inode).phys_size = inode->i_size; | |
502 | HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; | |
503 | inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits); | |
504 | mark_inode_dirty(inode); | |
505 | } |