5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
9 #include <linux/string.h>
10 #include <linux/slab.h>
11 #include <linux/ufs_fs.h>
12 #include <linux/buffer_head.h>
17 struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
18 struct super_block *sb, u64 fragment, u64 size)
20 struct ufs_buffer_head * ubh;
23 if (size & ~uspi->s_fmask)
25 count = size >> uspi->s_fshift;
26 if (count > UFS_MAXFRAG)
28 ubh = (struct ufs_buffer_head *)
29 kmalloc (sizeof (struct ufs_buffer_head), GFP_KERNEL);
32 ubh->fragment = fragment;
34 for (i = 0; i < count; i++)
35 if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
37 for (; i < UFS_MAXFRAG; i++)
41 for (j = 0; j < i; j++)
47 struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
48 struct super_block *sb, u64 fragment, u64 size)
52 if (size & ~uspi->s_fmask)
54 count = size >> uspi->s_fshift;
55 if (count <= 0 || count > UFS_MAXFRAG)
57 USPI_UBH(uspi)->fragment = fragment;
58 USPI_UBH(uspi)->count = count;
59 for (i = 0; i < count; i++)
60 if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i)))
62 for (; i < UFS_MAXFRAG; i++)
63 USPI_UBH(uspi)->bh[i] = NULL;
64 return USPI_UBH(uspi);
66 for (j = 0; j < i; j++)
67 brelse (USPI_UBH(uspi)->bh[j]);
71 void ubh_brelse (struct ufs_buffer_head * ubh)
76 for (i = 0; i < ubh->count; i++)
81 void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
86 for ( i = 0; i < USPI_UBH(uspi)->count; i++ ) {
87 brelse (USPI_UBH(uspi)->bh[i]);
88 USPI_UBH(uspi)->bh[i] = NULL;
92 void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh)
97 for ( i = 0; i < ubh->count; i++ )
98 mark_buffer_dirty (ubh->bh[i]);
101 void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
107 for ( i = 0; i < ubh->count; i++ )
108 set_buffer_uptodate (ubh->bh[i]);
110 for ( i = 0; i < ubh->count; i++ )
111 clear_buffer_uptodate (ubh->bh[i]);
115 void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh)
120 ll_rw_block(rw, ubh->count, ubh->bh);
123 void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
128 for ( i = 0; i < ubh->count; i++ )
129 wait_on_buffer (ubh->bh[i]);
132 void ubh_bforget (struct ufs_buffer_head * ubh)
137 for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] )
138 bforget (ubh->bh[i]);
141 int ubh_buffer_dirty (struct ufs_buffer_head * ubh)
147 for ( i = 0; i < ubh->count; i++ )
148 result |= buffer_dirty(ubh->bh[i]);
152 void _ubh_ubhcpymem_(struct ufs_sb_private_info * uspi,
153 unsigned char * mem, struct ufs_buffer_head * ubh, unsigned size)
156 if (size > (ubh->count << uspi->s_fshift))
157 size = ubh->count << uspi->s_fshift;
160 len = min_t(unsigned int, size, uspi->s_fsize);
161 memcpy (mem, ubh->bh[bhno]->b_data, len);
162 mem += uspi->s_fsize;
168 void _ubh_memcpyubh_(struct ufs_sb_private_info * uspi,
169 struct ufs_buffer_head * ubh, unsigned char * mem, unsigned size)
172 if (size > (ubh->count << uspi->s_fshift))
173 size = ubh->count << uspi->s_fshift;
176 len = min_t(unsigned int, size, uspi->s_fsize);
177 memcpy (ubh->bh[bhno]->b_data, mem, len);
178 mem += uspi->s_fsize;
185 ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi)
190 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
191 fs32 = ufsi->i_u1.i_data[1];
193 fs32 = ufsi->i_u1.i_data[0];
194 fs32 = fs32_to_cpu(sb, fs32);
195 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
198 if ((fs32 & 0xffff0000) == 0 ||
199 (fs32 & 0xffff0000) == 0xffff0000)
200 dev = old_decode_dev(fs32 & 0x7fff);
202 dev = MKDEV(sysv_major(fs32), sysv_minor(fs32));
206 dev = old_decode_dev(fs32);
213 ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev)
217 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
220 fs32 = sysv_encode_dev(dev);
221 if ((fs32 & 0xffff8000) == 0) {
222 fs32 = old_encode_dev(dev);
227 fs32 = old_encode_dev(dev);
230 fs32 = cpu_to_fs32(sb, fs32);
231 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
232 ufsi->i_u1.i_data[1] = fs32;
234 ufsi->i_u1.i_data[0] = fs32;
238 * ufs_get_locked_page() - locate, pin and lock a pagecache page, if not exist
240 * @mapping: the address_space to search
241 * @index: the page index
243 * Locates the desired pagecache page, if not exist we'll read it,
244 * locks it, increments its reference
245 * count and returns its address.
249 struct page *ufs_get_locked_page(struct address_space *mapping,
254 page = find_lock_page(mapping, index);
256 page = read_cache_page(mapping, index,
257 (filler_t*)mapping->a_ops->readpage,
261 printk(KERN_ERR "ufs_change_blocknr: "
262 "read_cache_page error: ino %lu, index: %lu\n",
263 mapping->host->i_ino, index);
269 if (unlikely(page->mapping == NULL)) {
270 /* Truncate got there first */
272 page_cache_release(page);
277 if (!PageUptodate(page) || PageError(page)) {
279 page_cache_release(page);
281 printk(KERN_ERR "ufs_change_blocknr: "
282 "can not read page: ino %lu, index: %lu\n",
283 mapping->host->i_ino, index);
285 page = ERR_PTR(-EIO);