* orig_start..fdt->next_fd
*/
start = orig_start;
- if (start < fdt->next_fd)
- start = fdt->next_fd;
+ if (start < files->next_fd)
+ start = files->next_fd;
newfd = start;
- if (start < fdt->max_fdset) {
+ if (start < fdt->max_fds)
newfd = find_next_zero_bit(fdt->open_fds->fds_bits,
- fdt->max_fdset, start);
- }
+ fdt->max_fds, start);
error = -EMFILE;
if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
* we reacquire the fdtable pointer and use it while holding
* the lock, no one can free it during that time.
*/
- fdt = files_fdtable(files);
- if (start <= fdt->next_fd)
- fdt->next_fd = newfd + 1;
+ if (start <= files->next_fd)
+ files->next_fd = newfd + 1;
error = newfd;
static int setfl(int fd, struct file * filp, unsigned long arg)
{
- struct inode * inode = filp->f_dentry->d_inode;
+ struct inode * inode = filp->f_path.dentry->d_inode;
int error = 0;
- /* O_APPEND cannot be cleared if the file is marked as append-only */
- if (!(arg & O_APPEND) && IS_APPEND(inode))
+ /*
+ * O_APPEND cannot be cleared if the file is marked as append-only
+ * and the file is open for write.
+ */
+ if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
return -EPERM;
/* O_NOATIME can only be set by the owner or superuser */
if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
- if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
/* required for strict SunOS emulation */
return error;
}
-static void f_modown(struct file *filp, unsigned long pid,
+static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
uid_t uid, uid_t euid, int force)
{
write_lock_irq(&filp->f_owner.lock);
if (force || !filp->f_owner.pid) {
- filp->f_owner.pid = pid;
+ put_pid(filp->f_owner.pid);
+ filp->f_owner.pid = get_pid(pid);
+ filp->f_owner.pid_type = type;
filp->f_owner.uid = uid;
filp->f_owner.euid = euid;
}
write_unlock_irq(&filp->f_owner.lock);
}
-int f_setown(struct file *filp, unsigned long arg, int force)
+int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
+ int force)
{
int err;
if (err)
return err;
- f_modown(filp, arg, current->uid, current->euid, force);
+ f_modown(filp, pid, type, current->uid, current->euid, force);
return 0;
}
+EXPORT_SYMBOL(__f_setown);
+int f_setown(struct file *filp, unsigned long arg, int force)
+{
+ enum pid_type type;
+ struct pid *pid;
+ int who = arg;
+ int result;
+ type = PIDTYPE_PID;
+ if (who < 0) {
+ type = PIDTYPE_PGID;
+ who = -who;
+ }
+ rcu_read_lock();
+ pid = find_pid(who);
+ result = __f_setown(filp, pid, type, force);
+ rcu_read_unlock();
+ return result;
+}
EXPORT_SYMBOL(f_setown);
void f_delown(struct file *filp)
{
- f_modown(filp, 0, 0, 0, 1);
+ f_modown(filp, NULL, PIDTYPE_PID, 0, 0, 1);
+}
+
+pid_t f_getown(struct file *filp)
+{
+ pid_t pid;
+ read_lock(&filp->f_owner.lock);
+ pid = pid_nr(filp->f_owner.pid);
+ if (filp->f_owner.pid_type == PIDTYPE_PGID)
+ pid = -pid;
+ read_unlock(&filp->f_owner.lock);
+ return pid;
}
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
* current syscall conventions, the only way
* to fix this will be in libc.
*/
- err = filp->f_owner.pid;
+ err = f_getown(filp);
force_successful_syscall_return();
break;
case F_SETOWN:
/* Table to convert sigio signal codes into poll band bitmaps */
-static long band_table[NSIGPOLL] = {
+static const long band_table[NSIGPOLL] = {
POLLIN | POLLRDNORM, /* POLL_IN */
POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
/* Make sure we are called with one of the POLL_*
reasons, otherwise we could leak kernel stack into
userspace. */
- if ((reason & __SI_MASK) != __SI_POLL)
- BUG();
+ BUG_ON((reason & __SI_MASK) != __SI_POLL);
if (reason - POLL_IN >= NSIGPOLL)
si.si_band = ~0L;
else
void send_sigio(struct fown_struct *fown, int fd, int band)
{
struct task_struct *p;
- int pid;
+ enum pid_type type;
+ struct pid *pid;
read_lock(&fown->lock);
+ type = fown->pid_type;
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
read_lock(&tasklist_lock);
- if (pid > 0) {
- p = find_task_by_pid(pid);
- if (p) {
- send_sigio_to_task(p, fown, fd, band);
- }
- } else {
- do_each_task_pid(-pid, PIDTYPE_PGID, p) {
- send_sigio_to_task(p, fown, fd, band);
- } while_each_task_pid(-pid, PIDTYPE_PGID, p);
- }
+ do_each_pid_task(pid, type, p) {
+ send_sigio_to_task(p, fown, fd, band);
+ } while_each_pid_task(pid, type, p);
read_unlock(&tasklist_lock);
out_unlock_fown:
read_unlock(&fown->lock);
int send_sigurg(struct fown_struct *fown)
{
struct task_struct *p;
- int pid, ret = 0;
+ enum pid_type type;
+ struct pid *pid;
+ int ret = 0;
read_lock(&fown->lock);
+ type = fown->pid_type;
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
ret = 1;
read_lock(&tasklist_lock);
- if (pid > 0) {
- p = find_task_by_pid(pid);
- if (p) {
- send_sigurg_to_task(p, fown);
- }
- } else {
- do_each_task_pid(-pid, PIDTYPE_PGID, p) {
- send_sigurg_to_task(p, fown);
- } while_each_task_pid(-pid, PIDTYPE_PGID, p);
- }
+ do_each_pid_task(pid, type, p) {
+ send_sigurg_to_task(p, fown);
+ } while_each_pid_task(pid, type, p);
read_unlock(&tasklist_lock);
out_unlock_fown:
read_unlock(&fown->lock);
}
static DEFINE_RWLOCK(fasync_lock);
-static kmem_cache_t *fasync_cache;
+static struct kmem_cache *fasync_cache __read_mostly;
/*
* fasync_helper() is used by some character device drivers (mainly mice)
int result = 0;
if (on) {
- new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
+ new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
if (!new)
return -ENOMEM;
}