4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/dir.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * ext2 directory handling functions
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
20 * All code that works with directory layout had been switched to pagecache
25 #include <linux/pagemap.h>
26 #include <linux/smp_lock.h>
28 typedef struct ext2_dir_entry_2 ext2_dirent;
31 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
32 * more robust, but we have what we have
34 static inline unsigned ext2_chunk_size(struct inode *inode)
36 return inode->i_sb->s_blocksize;
39 static inline void ext2_put_page(struct page *page)
42 page_cache_release(page);
45 static inline unsigned long dir_pages(struct inode *inode)
47 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
51 * Return the offset into page `page_nr' of the last valid
52 * byte in that page, plus one.
55 ext2_last_byte(struct inode *inode, unsigned long page_nr)
57 unsigned last_byte = inode->i_size;
59 last_byte -= page_nr << PAGE_CACHE_SHIFT;
60 if (last_byte > PAGE_CACHE_SIZE)
61 last_byte = PAGE_CACHE_SIZE;
65 static int ext2_commit_chunk(struct page *page, unsigned from, unsigned to)
67 struct inode *dir = page->mapping->host;
70 page->mapping->a_ops->commit_write(NULL, page, from, to);
72 err = write_one_page(page, 1);
78 static void ext2_check_page(struct page *page)
80 struct inode *dir = page->mapping->host;
81 struct super_block *sb = dir->i_sb;
82 unsigned chunk_size = ext2_chunk_size(dir);
83 char *kaddr = page_address(page);
84 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
85 unsigned offs, rec_len;
86 unsigned limit = PAGE_CACHE_SIZE;
90 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
91 limit = dir->i_size & ~PAGE_CACHE_MASK;
92 if (limit & (chunk_size - 1))
97 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
98 p = (ext2_dirent *)(kaddr + offs);
99 rec_len = le16_to_cpu(p->rec_len);
101 if (rec_len < EXT2_DIR_REC_LEN(1))
105 if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
107 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
109 if (le32_to_cpu(p->inode) > max_inumber)
115 SetPageChecked(page);
118 /* Too bad, we had an error */
121 ext2_error(sb, "ext2_check_page",
122 "size of directory #%lu is not a multiple of chunk size",
127 error = "rec_len is smaller than minimal";
130 error = "unaligned directory entry";
133 error = "rec_len is too small for name_len";
136 error = "directory entry across blocks";
139 error = "inode out of bounds";
141 ext2_error (sb, "ext2_check_page", "bad entry in directory #%lu: %s - "
142 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
143 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
144 (unsigned long) le32_to_cpu(p->inode),
145 rec_len, p->name_len);
148 p = (ext2_dirent *)(kaddr + offs);
149 ext2_error (sb, "ext2_check_page",
150 "entry in directory #%lu spans the page boundary"
151 "offset=%lu, inode=%lu",
152 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
153 (unsigned long) le32_to_cpu(p->inode));
155 SetPageChecked(page);
159 static struct page * ext2_get_page(struct inode *dir, unsigned long n)
161 struct address_space *mapping = dir->i_mapping;
162 struct page *page = read_mapping_page(mapping, n, NULL);
164 wait_on_page_locked(page);
166 if (!PageUptodate(page))
168 if (!PageChecked(page))
169 ext2_check_page(page);
177 return ERR_PTR(-EIO);
181 * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
183 * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
185 static inline int ext2_match (int len, const char * const name,
186 struct ext2_dir_entry_2 * de)
188 if (len != de->name_len)
192 return !memcmp(name, de->name, len);
196 * p is at least 6 bytes before the end of page
198 static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
200 return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len));
203 static inline unsigned
204 ext2_validate_entry(char *base, unsigned offset, unsigned mask)
206 ext2_dirent *de = (ext2_dirent*)(base + offset);
207 ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
208 while ((char*)p < (char*)de) {
211 p = ext2_next_entry(p);
213 return (char *)p - base;
216 static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
217 [EXT2_FT_UNKNOWN] = DT_UNKNOWN,
218 [EXT2_FT_REG_FILE] = DT_REG,
219 [EXT2_FT_DIR] = DT_DIR,
220 [EXT2_FT_CHRDEV] = DT_CHR,
221 [EXT2_FT_BLKDEV] = DT_BLK,
222 [EXT2_FT_FIFO] = DT_FIFO,
223 [EXT2_FT_SOCK] = DT_SOCK,
224 [EXT2_FT_SYMLINK] = DT_LNK,
228 static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
229 [S_IFREG >> S_SHIFT] = EXT2_FT_REG_FILE,
230 [S_IFDIR >> S_SHIFT] = EXT2_FT_DIR,
231 [S_IFCHR >> S_SHIFT] = EXT2_FT_CHRDEV,
232 [S_IFBLK >> S_SHIFT] = EXT2_FT_BLKDEV,
233 [S_IFIFO >> S_SHIFT] = EXT2_FT_FIFO,
234 [S_IFSOCK >> S_SHIFT] = EXT2_FT_SOCK,
235 [S_IFLNK >> S_SHIFT] = EXT2_FT_SYMLINK,
238 static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
240 mode_t mode = inode->i_mode;
241 if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
242 de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
248 ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
250 loff_t pos = filp->f_pos;
251 struct inode *inode = filp->f_path.dentry->d_inode;
252 struct super_block *sb = inode->i_sb;
253 unsigned int offset = pos & ~PAGE_CACHE_MASK;
254 unsigned long n = pos >> PAGE_CACHE_SHIFT;
255 unsigned long npages = dir_pages(inode);
256 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
257 unsigned char *types = NULL;
258 int need_revalidate = filp->f_version != inode->i_version;
260 if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
263 if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
264 types = ext2_filetype_table;
266 for ( ; n < npages; n++, offset = 0) {
269 struct page *page = ext2_get_page(inode, n);
272 ext2_error(sb, __FUNCTION__,
275 filp->f_pos += PAGE_CACHE_SIZE - offset;
278 kaddr = page_address(page);
279 if (unlikely(need_revalidate)) {
281 offset = ext2_validate_entry(kaddr, offset, chunk_mask);
282 filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
284 filp->f_version = inode->i_version;
287 de = (ext2_dirent *)(kaddr+offset);
288 limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
289 for ( ;(char*)de <= limit; de = ext2_next_entry(de)) {
290 if (de->rec_len == 0) {
291 ext2_error(sb, __FUNCTION__,
292 "zero-length directory entry");
298 unsigned char d_type = DT_UNKNOWN;
300 if (types && de->file_type < EXT2_FT_MAX)
301 d_type = types[de->file_type];
303 offset = (char *)de - kaddr;
304 over = filldir(dirent, de->name, de->name_len,
305 (n<<PAGE_CACHE_SHIFT) | offset,
306 le32_to_cpu(de->inode), d_type);
312 filp->f_pos += le16_to_cpu(de->rec_len);
322 * finds an entry in the specified directory with the wanted name. It
323 * returns the page in which the entry was found, and the entry itself
324 * (as a parameter - res_dir). Page is returned mapped and unlocked.
325 * Entry is guaranteed to be valid.
327 struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
328 struct dentry *dentry, struct page ** res_page)
330 const char *name = dentry->d_name.name;
331 int namelen = dentry->d_name.len;
332 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
333 unsigned long start, n;
334 unsigned long npages = dir_pages(dir);
335 struct page *page = NULL;
336 struct ext2_inode_info *ei = EXT2_I(dir);
345 start = ei->i_dir_start_lookup;
351 page = ext2_get_page(dir, n);
353 kaddr = page_address(page);
354 de = (ext2_dirent *) kaddr;
355 kaddr += ext2_last_byte(dir, n) - reclen;
356 while ((char *) de <= kaddr) {
357 if (de->rec_len == 0) {
358 ext2_error(dir->i_sb, __FUNCTION__,
359 "zero-length directory entry");
363 if (ext2_match (namelen, name, de))
365 de = ext2_next_entry(de);
371 /* next page is past the blocks we've got */
372 if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
373 ext2_error(dir->i_sb, __FUNCTION__,
374 "dir %lu size %lld exceeds block count %llu",
375 dir->i_ino, dir->i_size,
376 (unsigned long long)dir->i_blocks);
379 } while (n != start);
385 ei->i_dir_start_lookup = n;
389 struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
391 struct page *page = ext2_get_page(dir, 0);
392 ext2_dirent *de = NULL;
395 de = ext2_next_entry((ext2_dirent *) page_address(page));
401 ino_t ext2_inode_by_name(struct inode * dir, struct dentry *dentry)
404 struct ext2_dir_entry_2 * de;
407 de = ext2_find_entry (dir, dentry, &page);
409 res = le32_to_cpu(de->inode);
415 /* Releases the page */
416 void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
417 struct page *page, struct inode *inode)
419 unsigned from = (char *) de - (char *) page_address(page);
420 unsigned to = from + le16_to_cpu(de->rec_len);
424 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
426 de->inode = cpu_to_le32(inode->i_ino);
427 ext2_set_de_type (de, inode);
428 err = ext2_commit_chunk(page, from, to);
430 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
431 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
432 mark_inode_dirty(dir);
438 int ext2_add_link (struct dentry *dentry, struct inode *inode)
440 struct inode *dir = dentry->d_parent->d_inode;
441 const char *name = dentry->d_name.name;
442 int namelen = dentry->d_name.len;
443 unsigned chunk_size = ext2_chunk_size(dir);
444 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
445 unsigned short rec_len, name_len;
446 struct page *page = NULL;
448 unsigned long npages = dir_pages(dir);
455 * We take care of directory expansion in the same loop.
456 * This code plays outside i_size, so it locks the page
457 * to protect that region.
459 for (n = 0; n <= npages; n++) {
462 page = ext2_get_page(dir, n);
467 kaddr = page_address(page);
468 dir_end = kaddr + ext2_last_byte(dir, n);
469 de = (ext2_dirent *)kaddr;
470 kaddr += PAGE_CACHE_SIZE - reclen;
471 while ((char *)de <= kaddr) {
472 if ((char *)de == dir_end) {
475 rec_len = chunk_size;
476 de->rec_len = cpu_to_le16(chunk_size);
480 if (de->rec_len == 0) {
481 ext2_error(dir->i_sb, __FUNCTION__,
482 "zero-length directory entry");
487 if (ext2_match (namelen, name, de))
489 name_len = EXT2_DIR_REC_LEN(de->name_len);
490 rec_len = le16_to_cpu(de->rec_len);
491 if (!de->inode && rec_len >= reclen)
493 if (rec_len >= name_len + reclen)
495 de = (ext2_dirent *) ((char *) de + rec_len);
504 from = (char*)de - (char*)page_address(page);
506 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
510 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
511 de1->rec_len = cpu_to_le16(rec_len - name_len);
512 de->rec_len = cpu_to_le16(name_len);
515 de->name_len = namelen;
516 memcpy (de->name, name, namelen);
517 de->inode = cpu_to_le32(inode->i_ino);
518 ext2_set_de_type (de, inode);
519 err = ext2_commit_chunk(page, from, to);
520 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
521 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
522 mark_inode_dirty(dir);
534 * ext2_delete_entry deletes a directory entry by merging it with the
535 * previous entry. Page is up-to-date. Releases the page.
537 int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
539 struct address_space *mapping = page->mapping;
540 struct inode *inode = mapping->host;
541 char *kaddr = page_address(page);
542 unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
543 unsigned to = ((char*)dir - kaddr) + le16_to_cpu(dir->rec_len);
544 ext2_dirent * pde = NULL;
545 ext2_dirent * de = (ext2_dirent *) (kaddr + from);
548 while ((char*)de < (char*)dir) {
549 if (de->rec_len == 0) {
550 ext2_error(inode->i_sb, __FUNCTION__,
551 "zero-length directory entry");
556 de = ext2_next_entry(de);
559 from = (char*)pde - (char*)page_address(page);
561 err = mapping->a_ops->prepare_write(NULL, page, from, to);
564 pde->rec_len = cpu_to_le16(to-from);
566 err = ext2_commit_chunk(page, from, to);
567 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
568 EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
569 mark_inode_dirty(inode);
576 * Set the first fragment of directory.
578 int ext2_make_empty(struct inode *inode, struct inode *parent)
580 struct address_space *mapping = inode->i_mapping;
581 struct page *page = grab_cache_page(mapping, 0);
582 unsigned chunk_size = ext2_chunk_size(inode);
583 struct ext2_dir_entry_2 * de;
589 err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
594 kaddr = kmap_atomic(page, KM_USER0);
595 memset(kaddr, 0, chunk_size);
596 de = (struct ext2_dir_entry_2 *)kaddr;
598 de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
599 memcpy (de->name, ".\0\0", 4);
600 de->inode = cpu_to_le32(inode->i_ino);
601 ext2_set_de_type (de, inode);
603 de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1));
605 de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1));
606 de->inode = cpu_to_le32(parent->i_ino);
607 memcpy (de->name, "..\0", 4);
608 ext2_set_de_type (de, inode);
609 kunmap_atomic(kaddr, KM_USER0);
610 err = ext2_commit_chunk(page, 0, chunk_size);
612 page_cache_release(page);
617 * routine to check that the specified directory is empty (for rmdir)
619 int ext2_empty_dir (struct inode * inode)
621 struct page *page = NULL;
622 unsigned long i, npages = dir_pages(inode);
624 for (i = 0; i < npages; i++) {
627 page = ext2_get_page(inode, i);
632 kaddr = page_address(page);
633 de = (ext2_dirent *)kaddr;
634 kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1);
636 while ((char *)de <= kaddr) {
637 if (de->rec_len == 0) {
638 ext2_error(inode->i_sb, __FUNCTION__,
639 "zero-length directory entry");
640 printk("kaddr=%p, de=%p\n", kaddr, de);
643 if (de->inode != 0) {
644 /* check for . and .. */
645 if (de->name[0] != '.')
647 if (de->name_len > 2)
649 if (de->name_len < 2) {
651 cpu_to_le32(inode->i_ino))
653 } else if (de->name[1] != '.')
656 de = ext2_next_entry(de);
667 const struct file_operations ext2_dir_operations = {
668 .llseek = generic_file_llseek,
669 .read = generic_read_dir,
670 .readdir = ext2_readdir,
673 .compat_ioctl = ext2_compat_ioctl,
675 .fsync = ext2_sync_file,