5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
9 #include <linux/string.h>
10 #include <linux/slab.h>
11 #include <linux/ufs_fs.h>
12 #include <linux/buffer_head.h>
18 struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
19 struct super_block *sb, u64 fragment, u64 size)
21 struct ufs_buffer_head * ubh;
24 if (size & ~uspi->s_fmask)
26 count = size >> uspi->s_fshift;
27 if (count > UFS_MAXFRAG)
29 ubh = (struct ufs_buffer_head *)
30 kmalloc (sizeof (struct ufs_buffer_head), GFP_KERNEL);
33 ubh->fragment = fragment;
35 for (i = 0; i < count; i++)
36 if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
38 for (; i < UFS_MAXFRAG; i++)
42 for (j = 0; j < i; j++)
48 struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
49 struct super_block *sb, u64 fragment, u64 size)
53 if (size & ~uspi->s_fmask)
55 count = size >> uspi->s_fshift;
56 if (count <= 0 || count > UFS_MAXFRAG)
58 USPI_UBH(uspi)->fragment = fragment;
59 USPI_UBH(uspi)->count = count;
60 for (i = 0; i < count; i++)
61 if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i)))
63 for (; i < UFS_MAXFRAG; i++)
64 USPI_UBH(uspi)->bh[i] = NULL;
65 return USPI_UBH(uspi);
67 for (j = 0; j < i; j++)
68 brelse (USPI_UBH(uspi)->bh[j]);
72 void ubh_brelse (struct ufs_buffer_head * ubh)
77 for (i = 0; i < ubh->count; i++)
82 void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
87 for ( i = 0; i < USPI_UBH(uspi)->count; i++ ) {
88 brelse (USPI_UBH(uspi)->bh[i]);
89 USPI_UBH(uspi)->bh[i] = NULL;
93 void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh)
98 for ( i = 0; i < ubh->count; i++ )
99 mark_buffer_dirty (ubh->bh[i]);
102 void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
108 for ( i = 0; i < ubh->count; i++ )
109 set_buffer_uptodate (ubh->bh[i]);
111 for ( i = 0; i < ubh->count; i++ )
112 clear_buffer_uptodate (ubh->bh[i]);
116 void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh)
121 ll_rw_block(rw, ubh->count, ubh->bh);
124 void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
129 for ( i = 0; i < ubh->count; i++ )
130 wait_on_buffer (ubh->bh[i]);
133 void ubh_bforget (struct ufs_buffer_head * ubh)
138 for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] )
139 bforget (ubh->bh[i]);
142 int ubh_buffer_dirty (struct ufs_buffer_head * ubh)
148 for ( i = 0; i < ubh->count; i++ )
149 result |= buffer_dirty(ubh->bh[i]);
153 void _ubh_ubhcpymem_(struct ufs_sb_private_info * uspi,
154 unsigned char * mem, struct ufs_buffer_head * ubh, unsigned size)
157 if (size > (ubh->count << uspi->s_fshift))
158 size = ubh->count << uspi->s_fshift;
161 len = min_t(unsigned int, size, uspi->s_fsize);
162 memcpy (mem, ubh->bh[bhno]->b_data, len);
163 mem += uspi->s_fsize;
169 void _ubh_memcpyubh_(struct ufs_sb_private_info * uspi,
170 struct ufs_buffer_head * ubh, unsigned char * mem, unsigned size)
173 if (size > (ubh->count << uspi->s_fshift))
174 size = ubh->count << uspi->s_fshift;
177 len = min_t(unsigned int, size, uspi->s_fsize);
178 memcpy (ubh->bh[bhno]->b_data, mem, len);
179 mem += uspi->s_fsize;
186 ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi)
191 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
192 fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[1]);
194 fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[0]);
195 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
198 if ((fs32 & 0xffff0000) == 0 ||
199 (fs32 & 0xffff0000) == 0xffff0000)
200 dev = old_decode_dev(fs32 & 0x7fff);
202 dev = MKDEV(sysv_major(fs32), sysv_minor(fs32));
206 dev = old_decode_dev(fs32);
213 ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev)
217 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
220 fs32 = sysv_encode_dev(dev);
221 if ((fs32 & 0xffff8000) == 0) {
222 fs32 = old_encode_dev(dev);
227 fs32 = old_encode_dev(dev);
230 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
231 ufsi->i_u1.i_data[1] = cpu_to_fs32(sb, fs32);
233 ufsi->i_u1.i_data[0] = cpu_to_fs32(sb, fs32);
237 * ufs_get_locked_page() - locate, pin and lock a pagecache page, if not exist
239 * @mapping: the address_space to search
240 * @index: the page index
242 * Locates the desired pagecache page, if not exist we'll read it,
243 * locks it, increments its reference
244 * count and returns its address.
248 struct page *ufs_get_locked_page(struct address_space *mapping,
253 page = find_lock_page(mapping, index);
255 page = read_mapping_page(mapping, index, NULL);
258 printk(KERN_ERR "ufs_change_blocknr: "
259 "read_mapping_page error: ino %lu, index: %lu\n",
260 mapping->host->i_ino, index);
266 if (unlikely(page->mapping == NULL)) {
267 /* Truncate got there first */
269 page_cache_release(page);
274 if (!PageUptodate(page) || PageError(page)) {
276 page_cache_release(page);
278 printk(KERN_ERR "ufs_change_blocknr: "
279 "can not read page: ino %lu, index: %lu\n",
280 mapping->host->i_ino, index);
282 page = ERR_PTR(-EIO);