* Initialises the fields of the file lock which are invariant for
* free file_locks.
*/
-static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
+static void init_once(struct kmem_cache *cache, void *foo)
{
struct file_lock *lock = (struct file_lock *) foo;
}
/* Allocate a file_lock initialised to this type of lease */
-static int lease_alloc(struct file *filp, int type, struct file_lock **flp)
+static struct file_lock *lease_alloc(struct file *filp, int type)
{
struct file_lock *fl = locks_alloc_lock();
int error = -ENOMEM;
if (fl == NULL)
- goto out;
+ return ERR_PTR(error);
error = lease_init(filp, type, fl);
if (error) {
locks_free_lock(fl);
- fl = NULL;
+ return ERR_PTR(error);
}
-out:
- *flp = fl;
- return error;
+ return fl;
}
/* Check if two locks overlap each other.
static void locks_wake_up_blocks(struct file_lock *blocker)
{
while (!list_empty(&blocker->fl_block)) {
- struct file_lock *waiter = list_entry(blocker->fl_block.next,
+ struct file_lock *waiter;
+
+ waiter = list_first_entry(&blocker->fl_block,
struct file_lock, fl_block);
__locks_delete_block(waiter);
if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
return result;
}
-int
+void
posix_test_lock(struct file *filp, struct file_lock *fl)
{
struct file_lock *cfl;
for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
if (!IS_POSIX(cfl))
continue;
- if (posix_locks_conflict(cfl, fl))
+ if (posix_locks_conflict(fl, cfl))
break;
}
- if (cfl) {
+ if (cfl)
__locks_copy_lock(fl, cfl);
- unlock_kernel();
- return 1;
- } else
+ else
fl->fl_type = F_UNLCK;
unlock_kernel();
- return 0;
+ return;
}
EXPORT_SYMBOL(posix_test_lock);
static int posix_locks_deadlock(struct file_lock *caller_fl,
struct file_lock *block_fl)
{
- struct list_head *tmp;
+ struct file_lock *fl;
next_task:
if (posix_same_owner(caller_fl, block_fl))
return 1;
- list_for_each(tmp, &blocked_list) {
- struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link);
+ list_for_each_entry(fl, &blocked_list, fl_link) {
if (posix_same_owner(fl, block_fl)) {
fl = fl->fl_next;
block_fl = fl;
}
/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
- * at the head of the list, but that's secret knowledge known only to
- * flock_lock_file and posix_lock_file.
+ * after any leases, but before any posix locks.
*
* Note that if called with an FL_EXISTS argument, the caller may determine
* whether or not a lock was successfully freed by testing the return
lock_kernel();
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
+
+ if (request->fl_type != F_UNLCK) {
+ error = -ENOMEM;
+ new_fl = locks_alloc_lock();
+ if (new_fl == NULL)
+ goto out;
+ error = 0;
+ }
+
for_each_lock(inode, before) {
struct file_lock *fl = *before;
if (IS_POSIX(fl))
goto out;
}
- error = -ENOMEM;
- new_fl = locks_alloc_lock();
- if (new_fl == NULL)
- goto out;
/*
* If a higher-priority process was blocked on the old file lock,
* give it the opportunity to lock the file.
if (request->fl_flags & FL_ACCESS)
goto out;
locks_copy_lock(new_fl, request);
- locks_insert_lock(&inode->i_flock, new_fl);
+ locks_insert_lock(before, new_fl);
new_fl = NULL;
error = 0;
lock_kernel();
if (request->fl_type != F_UNLCK) {
for_each_lock(inode, before) {
- struct file_lock *fl = *before;
+ fl = *before;
if (!IS_POSIX(fl))
continue;
if (!posix_locks_conflict(request, fl))
* If we've been sleeping someone might have
* changed the permissions behind our back.
*/
- if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ if (__mandatory_lock(inode))
continue;
}
* @inode: the inode of the file to return
* @mode: the open mode (read or write)
*
- * break_lease (inlined for speed) has checked there already
- * is a lease on this file. Leases are broken on a call to open()
- * or truncate(). This function can sleep unless you
+ * break_lease (inlined for speed) has checked there already is at least
+ * some kind of lock (maybe a lease) on this file. Leases are broken on
+ * a call to open() or truncate(). This function can sleep unless you
* specified %O_NONBLOCK to your open().
*/
int __break_lease(struct inode *inode, unsigned int mode)
int error = 0, future;
struct file_lock *new_fl, *flock;
struct file_lock *fl;
- int alloc_err;
unsigned long break_time;
int i_have_this_lease = 0;
- alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK,
- &new_fl);
+ new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
lock_kernel();
goto out;
}
- if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) {
- error = alloc_err;
+ if (IS_ERR(new_fl) && !i_have_this_lease
+ && ((mode & O_NONBLOCK) == 0)) {
+ error = PTR_ERR(new_fl);
goto out;
}
out:
unlock_kernel();
- if (!alloc_err)
+ if (!IS_ERR(new_fl))
locks_free_lock(new_fl);
return error;
}
}
/**
- * __setlease - sets a lease on an open file
+ * generic_setlease - sets a lease on an open file
* @filp: file pointer
* @arg: type of lease to obtain
* @flp: input - file_lock to use, output - file_lock inserted
*
* Called with kernel lock held.
*/
-static int __setlease(struct file *filp, long arg, struct file_lock **flp)
+int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
{
struct file_lock *fl, **before, **my_before = NULL, *lease;
+ struct file_lock *new_fl = NULL;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
int error, rdlease_count = 0, wrlease_count = 0;
+ if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
+ return -EACCES;
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+ error = security_file_lock(filp, arg);
+ if (error)
+ return error;
+
time_out_leases(inode);
- error = -EINVAL;
- if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break)
- goto out;
+ BUG_ON(!(*flp)->fl_lmops->fl_break);
lease = *flp;
|| (atomic_read(&inode->i_count) > 1)))
goto out;
+ error = -ENOMEM;
+ new_fl = locks_alloc_lock();
+ if (new_fl == NULL)
+ goto out;
+
/*
* At this point, we know that if there is an exclusive
* lease on this file, then we hold it on this filp
if (!leases_enable)
goto out;
- error = -ENOMEM;
- fl = locks_alloc_lock();
- if (fl == NULL)
- goto out;
+ locks_copy_lock(new_fl, lease);
+ locks_insert_lock(before, new_fl);
- locks_copy_lock(fl, lease);
-
- locks_insert_lock(before, fl);
+ *flp = new_fl;
+ return 0;
- *flp = fl;
- error = 0;
out:
+ if (new_fl != NULL)
+ locks_free_lock(new_fl);
return error;
}
+EXPORT_SYMBOL(generic_setlease);
/**
- * setlease - sets a lease on an open file
+ * vfs_setlease - sets a lease on an open file
* @filp: file pointer
* @arg: type of lease to obtain
* @lease: file_lock to use
*
* Call this to establish a lease on the file.
- * The fl_lmops fl_break function is required by break_lease
+ * The (*lease)->fl_lmops->fl_break operation must be set; if not,
+ * break_lease will oops!
+ *
+ * This will call the filesystem's setlease file method, if
+ * defined. Note that there is no getlease method; instead, the
+ * filesystem setlease method should call back to setlease() to
+ * add a lease to the inode's lease list, where fcntl_getlease() can
+ * find it. Since fcntl_getlease() only reports whether the current
+ * task holds a lease, a cluster filesystem need only do this for
+ * leases held by processes on this node.
+ *
+ * There is also no break_lease method; filesystems that
+ * handle their own leases shoud break leases themselves from the
+ * filesystem's open, create, and (on truncate) setattr methods.
+ *
+ * Warning: the only current setlease methods exist only to disable
+ * leases in certain cases. More vfs changes may be required to
+ * allow a full filesystem lease implementation.
*/
-int setlease(struct file *filp, long arg, struct file_lock **lease)
+int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
{
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
int error;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
- return -EACCES;
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
- error = security_file_lock(filp, arg);
- if (error)
- return error;
-
lock_kernel();
- error = __setlease(filp, arg, lease);
+ if (filp->f_op && filp->f_op->setlease)
+ error = filp->f_op->setlease(filp, arg, lease);
+ else
+ error = generic_setlease(filp, arg, lease);
unlock_kernel();
return error;
}
-
-EXPORT_SYMBOL(setlease);
+EXPORT_SYMBOL_GPL(vfs_setlease);
/**
* fcntl_setlease - sets a lease on an open file
struct inode *inode = dentry->d_inode;
int error;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
- return -EACCES;
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
- error = security_file_lock(filp, arg);
- if (error)
- return error;
-
locks_init_lock(&fl);
error = lease_init(filp, arg, &fl);
if (error)
lock_kernel();
- error = __setlease(filp, arg, &flp);
+ error = vfs_setlease(filp, arg, &flp);
if (error || arg == F_UNLCK)
goto out_unlock;
error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
if (error < 0) {
- /* remove lease just inserted by __setlease */
+ /* remove lease just inserted by setlease */
flp->fl_type = F_UNLCK | F_INPROGRESS;
- flp->fl_break_time = jiffies- 10;
+ flp->fl_break_time = jiffies - 10;
time_out_leases(inode);
goto out_unlock;
}
/**
* vfs_test_lock - test file byte range lock
* @filp: The file to test lock for
- * @fl: The lock to test
- * @conf: Place to return a copy of the conflicting lock, if found
+ * @fl: The lock to test; also used to hold result
*
* Returns -ERRNO on failure. Indicates presence of conflicting lock by
* setting conf->fl_type to something other than F_UNLCK.
/* Don't allow mandatory locks on files that may be memory mapped
* and shared.
*/
- if (IS_MANDLOCK(inode) &&
- (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
- mapping_writably_mapped(filp->f_mapping)) {
+ if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
error = -EAGAIN;
goto out;
}
/* Don't allow mandatory locks on files that may be memory mapped
* and shared.
*/
- if (IS_MANDLOCK(inode) &&
- (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
- mapping_writably_mapped(filp->f_mapping)) {
+ if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
error = -EAGAIN;
goto out;
}
EXPORT_SYMBOL_GPL(vfs_cancel_lock);
-static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx)
+#ifdef CONFIG_PROC_FS
+#include <linux/seq_file.h>
+
+static void lock_get_status(struct seq_file *f, struct file_lock *fl,
+ int id, char *pfx)
{
struct inode *inode = NULL;
if (fl->fl_file != NULL)
inode = fl->fl_file->f_path.dentry->d_inode;
- out += sprintf(out, "%d:%s ", id, pfx);
+ seq_printf(f, "%d:%s ", id, pfx);
if (IS_POSIX(fl)) {
- out += sprintf(out, "%6s %s ",
+ seq_printf(f, "%6s %s ",
(fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
(inode == NULL) ? "*NOINODE*" :
- (IS_MANDLOCK(inode) &&
- (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ?
- "MANDATORY" : "ADVISORY ");
+ mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
} else if (IS_FLOCK(fl)) {
if (fl->fl_type & LOCK_MAND) {
- out += sprintf(out, "FLOCK MSNFS ");
+ seq_printf(f, "FLOCK MSNFS ");
} else {
- out += sprintf(out, "FLOCK ADVISORY ");
+ seq_printf(f, "FLOCK ADVISORY ");
}
} else if (IS_LEASE(fl)) {
- out += sprintf(out, "LEASE ");
+ seq_printf(f, "LEASE ");
if (fl->fl_type & F_INPROGRESS)
- out += sprintf(out, "BREAKING ");
+ seq_printf(f, "BREAKING ");
else if (fl->fl_file)
- out += sprintf(out, "ACTIVE ");
+ seq_printf(f, "ACTIVE ");
else
- out += sprintf(out, "BREAKER ");
+ seq_printf(f, "BREAKER ");
} else {
- out += sprintf(out, "UNKNOWN UNKNOWN ");
+ seq_printf(f, "UNKNOWN UNKNOWN ");
}
if (fl->fl_type & LOCK_MAND) {
- out += sprintf(out, "%s ",
+ seq_printf(f, "%s ",
(fl->fl_type & LOCK_READ)
? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
: (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
} else {
- out += sprintf(out, "%s ",
+ seq_printf(f, "%s ",
(fl->fl_type & F_INPROGRESS)
? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
: (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
}
if (inode) {
#ifdef WE_CAN_BREAK_LSLK_NOW
- out += sprintf(out, "%d %s:%ld ", fl->fl_pid,
+ seq_printf(f, "%d %s:%ld ", fl->fl_pid,
inode->i_sb->s_id, inode->i_ino);
#else
/* userspace relies on this representation of dev_t ;-( */
- out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid,
+ seq_printf(f, "%d %02x:%02x:%ld ", fl->fl_pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
#endif
} else {
- out += sprintf(out, "%d <none>:0 ", fl->fl_pid);
+ seq_printf(f, "%d <none>:0 ", fl->fl_pid);
}
if (IS_POSIX(fl)) {
if (fl->fl_end == OFFSET_MAX)
- out += sprintf(out, "%Ld EOF\n", fl->fl_start);
+ seq_printf(f, "%Ld EOF\n", fl->fl_start);
else
- out += sprintf(out, "%Ld %Ld\n", fl->fl_start,
- fl->fl_end);
+ seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
} else {
- out += sprintf(out, "0 EOF\n");
+ seq_printf(f, "0 EOF\n");
}
}
-static void move_lock_status(char **p, off_t* pos, off_t offset)
+static int locks_show(struct seq_file *f, void *v)
{
- int len;
- len = strlen(*p);
- if(*pos >= offset) {
- /* the complete line is valid */
- *p += len;
- *pos += len;
- return;
- }
- if(*pos+len > offset) {
- /* use the second part of the line */
- int i = offset-*pos;
- memmove(*p,*p+i,len-i);
- *p += len-i;
- *pos += len;
- return;
- }
- /* discard the complete line */
- *pos += len;
+ struct file_lock *fl, *bfl;
+
+ fl = list_entry(v, struct file_lock, fl_link);
+
+ lock_get_status(f, fl, (long)f->private, "");
+
+ list_for_each_entry(bfl, &fl->fl_block, fl_block)
+ lock_get_status(f, bfl, (long)f->private, " ->");
+
+ f->private++;
+ return 0;
}
-/**
- * get_locks_status - reports lock usage in /proc/locks
- * @buffer: address in userspace to write into
- * @start: ?
- * @offset: how far we are through the buffer
- * @length: how much to read
- */
+static void *locks_start(struct seq_file *f, loff_t *pos)
+{
+ lock_kernel();
+ f->private = (void *)1;
+ return seq_list_start(&file_lock_list, *pos);
+}
-int get_locks_status(char *buffer, char **start, off_t offset, int length)
+static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
{
- struct list_head *tmp;
- char *q = buffer;
- off_t pos = 0;
- int i = 0;
+ return seq_list_next(v, &file_lock_list, pos);
+}
- lock_kernel();
- list_for_each(tmp, &file_lock_list) {
- struct list_head *btmp;
- struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link);
- lock_get_status(q, fl, ++i, "");
- move_lock_status(&q, &pos, offset);
-
- if(pos >= offset+length)
- goto done;
-
- list_for_each(btmp, &fl->fl_block) {
- struct file_lock *bfl = list_entry(btmp,
- struct file_lock, fl_block);
- lock_get_status(q, bfl, i, " ->");
- move_lock_status(&q, &pos, offset);
-
- if(pos >= offset+length)
- goto done;
- }
- }
-done:
+static void locks_stop(struct seq_file *f, void *v)
+{
unlock_kernel();
- *start = buffer;
- if(q-buffer < length)
- return (q-buffer);
- return length;
}
+struct seq_operations locks_seq_operations = {
+ .start = locks_start,
+ .next = locks_next,
+ .stop = locks_stop,
+ .show = locks_show,
+};
+#endif
+
/**
* lock_may_read - checks that the region is free of locks
* @inode: the inode that is being read
{
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC,
- init_once, NULL);
+ init_once);
return 0;
}