* Copyright (C) 1991, 1992 Linus Torvalds
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/utsname.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
-#include <linux/init.h>
#include <linux/highuid.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/signal.h>
#include <linux/cn_proc.h>
+#include <linux/getcpu.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
#ifndef GET_FPEXC_CTL
# define GET_FPEXC_CTL(a,b) (-EINVAL)
#endif
+#ifndef GET_ENDIAN
+# define GET_ENDIAN(a,b) (-EINVAL)
+#endif
+#ifndef SET_ENDIAN
+# define SET_ENDIAN(a,b) (-EINVAL)
+#endif
/*
* this is where the system-wide overflow UID and GID are defined, for
*/
int C_A_D = 1;
-int cad_pid = 1;
+struct pid *cad_pid;
+EXPORT_SYMBOL(cad_pid);
/*
* Notifier list for kernel code which wants to be called
* and the like.
*/
-static struct notifier_block *reboot_notifier_list;
-static DEFINE_RWLOCK(notifier_lock);
+static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
+
+/*
+ * Notifier chain core routines. The exported routines below
+ * are layered on top of these, with appropriate locking added.
+ */
+
+static int notifier_chain_register(struct notifier_block **nl,
+ struct notifier_block *n)
+{
+ while ((*nl) != NULL) {
+ if (n->priority > (*nl)->priority)
+ break;
+ nl = &((*nl)->next);
+ }
+ n->next = *nl;
+ rcu_assign_pointer(*nl, n);
+ return 0;
+}
+
+static int notifier_chain_unregister(struct notifier_block **nl,
+ struct notifier_block *n)
+{
+ while ((*nl) != NULL) {
+ if ((*nl) == n) {
+ rcu_assign_pointer(*nl, n->next);
+ return 0;
+ }
+ nl = &((*nl)->next);
+ }
+ return -ENOENT;
+}
+
+static int __kprobes notifier_call_chain(struct notifier_block **nl,
+ unsigned long val, void *v)
+{
+ int ret = NOTIFY_DONE;
+ struct notifier_block *nb, *next_nb;
+
+ nb = rcu_dereference(*nl);
+ while (nb) {
+ next_nb = rcu_dereference(nb->next);
+ ret = nb->notifier_call(nb, val, v);
+ if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
+ break;
+ nb = next_nb;
+ }
+ return ret;
+}
+
+/*
+ * Atomic notifier chain routines. Registration and unregistration
+ * use a spinlock, and call_chain is synchronized by RCU (no locks).
+ */
/**
- * notifier_chain_register - Add notifier to a notifier chain
- * @list: Pointer to root list pointer
+ * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
* @n: New entry in notifier chain
*
- * Adds a notifier to a notifier chain.
+ * Adds a notifier to an atomic notifier chain.
*
* Currently always returns zero.
*/
+
+int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+ struct notifier_block *n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_register(&nh->head, n);
+ spin_unlock_irqrestore(&nh->lock, flags);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
+
+/**
+ * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from an atomic notifier chain.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+ struct notifier_block *n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_unregister(&nh->head, n);
+ spin_unlock_irqrestore(&nh->lock, flags);
+ synchronize_rcu();
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
+
+/**
+ * atomic_notifier_call_chain - Call functions in an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in an atomic context, so they must not block.
+ * This routine uses RCU to synchronize with changes to the chain.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
-int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v)
{
- write_lock(¬ifier_lock);
- while(*list)
- {
- if(n->priority > (*list)->priority)
- break;
- list= &((*list)->next);
- }
- n->next = *list;
- *list=n;
- write_unlock(¬ifier_lock);
- return 0;
+ int ret;
+
+ rcu_read_lock();
+ ret = notifier_call_chain(&nh->head, val, v);
+ rcu_read_unlock();
+ return ret;
}
-EXPORT_SYMBOL(notifier_chain_register);
+EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
+
+/*
+ * Blocking notifier chain routines. All access to the chain is
+ * synchronized by an rwsem.
+ */
/**
- * notifier_chain_unregister - Remove notifier from a notifier chain
- * @nl: Pointer to root list pointer
+ * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
* @n: New entry in notifier chain
*
- * Removes a notifier from a notifier chain.
+ * Adds a notifier to a blocking notifier chain.
+ * Must be called in process context.
*
- * Returns zero on success, or %-ENOENT on failure.
+ * Currently always returns zero.
*/
-int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
{
- write_lock(¬ifier_lock);
- while((*nl)!=NULL)
- {
- if((*nl)==n)
- {
- *nl=n->next;
- write_unlock(¬ifier_lock);
- return 0;
- }
- nl=&((*nl)->next);
- }
- write_unlock(¬ifier_lock);
- return -ENOENT;
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call down_write().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_register(&nh->head, n);
+
+ down_write(&nh->rwsem);
+ ret = notifier_chain_register(&nh->head, n);
+ up_write(&nh->rwsem);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
+
+/**
+ * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from a blocking notifier chain.
+ * Must be called from process context.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call down_write().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_unregister(&nh->head, n);
+
+ down_write(&nh->rwsem);
+ ret = notifier_chain_unregister(&nh->head, n);
+ up_write(&nh->rwsem);
+ return ret;
}
-EXPORT_SYMBOL(notifier_chain_unregister);
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
/**
- * notifier_call_chain - Call functions in a notifier chain
- * @n: Pointer to root pointer of notifier chain
+ * blocking_notifier_call_chain - Call functions in a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
*
- * Calls each function in a notifier chain in turn.
+ * Calls each function in a notifier chain in turn. The functions
+ * run in a process context, so they are allowed to block.
*
- * If the return value of the notifier can be and'd
- * with %NOTIFY_STOP_MASK, then notifier_call_chain
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
* will return immediately, with the return value of
* the notifier function which halted execution.
- * Otherwise, the return value is the return value
+ * Otherwise the return value is the return value
* of the last notifier function called.
*/
-int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v)
{
- int ret=NOTIFY_DONE;
- struct notifier_block *nb = *n;
+ int ret;
- while(nb)
- {
- ret=nb->notifier_call(nb,val,v);
- if(ret&NOTIFY_STOP_MASK)
- {
- return ret;
- }
- nb=nb->next;
- }
+ down_read(&nh->rwsem);
+ ret = notifier_call_chain(&nh->head, val, v);
+ up_read(&nh->rwsem);
return ret;
}
-EXPORT_SYMBOL(notifier_call_chain);
+EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
+
+/*
+ * Raw notifier chain routines. There is no protection;
+ * the caller must provide it. Use at your own risk!
+ */
+
+/**
+ * raw_notifier_chain_register - Add notifier to a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to a raw notifier chain.
+ * All locking must be provided by the caller.
+ *
+ * Currently always returns zero.
+ */
+
+int raw_notifier_chain_register(struct raw_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return notifier_chain_register(&nh->head, n);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
+
+/**
+ * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from a raw notifier chain.
+ * All locking must be provided by the caller.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return notifier_chain_unregister(&nh->head, n);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
+
+/**
+ * raw_notifier_call_chain - Call functions in a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in an undefined context.
+ * All locking must be provided by the caller.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
+
+int raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v)
+{
+ return notifier_call_chain(&nh->head, val, v);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
+
+/*
+ * SRCU notifier chain routines. Registration and unregistration
+ * use a mutex, and call_chain is synchronized by SRCU (no locks).
+ */
+
+/**
+ * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
+ * @nh: Pointer to head of the SRCU notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to an SRCU notifier chain.
+ * Must be called in process context.
+ *
+ * Currently always returns zero.
+ */
+
+int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call mutex_lock().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_register(&nh->head, n);
+
+ mutex_lock(&nh->mutex);
+ ret = notifier_chain_register(&nh->head, n);
+ mutex_unlock(&nh->mutex);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
+
+/**
+ * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
+ * @nh: Pointer to head of the SRCU notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from an SRCU notifier chain.
+ * Must be called from process context.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call mutex_lock().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_unregister(&nh->head, n);
+
+ mutex_lock(&nh->mutex);
+ ret = notifier_chain_unregister(&nh->head, n);
+ mutex_unlock(&nh->mutex);
+ synchronize_srcu(&nh->srcu);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
+
+/**
+ * srcu_notifier_call_chain - Call functions in an SRCU notifier chain
+ * @nh: Pointer to head of the SRCU notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in a process context, so they are allowed to block.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
+
+int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ unsigned long val, void *v)
+{
+ int ret;
+ int idx;
+
+ idx = srcu_read_lock(&nh->srcu);
+ ret = notifier_call_chain(&nh->head, val, v);
+ srcu_read_unlock(&nh->srcu, idx);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
+
+/**
+ * srcu_init_notifier_head - Initialize an SRCU notifier head
+ * @nh: Pointer to head of the srcu notifier chain
+ *
+ * Unlike other sorts of notifier heads, SRCU notifier heads require
+ * dynamic initialization. Be sure to call this routine before
+ * calling any of the other SRCU notifier routines for this head.
+ *
+ * If an SRCU notifier head is deallocated, it must first be cleaned
+ * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
+ * per-cpu data (used by the SRCU mechanism) will leak.
+ */
+
+void srcu_init_notifier_head(struct srcu_notifier_head *nh)
+{
+ mutex_init(&nh->mutex);
+ if (init_srcu_struct(&nh->srcu) < 0)
+ BUG();
+ nh->head = NULL;
+}
+
+EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
/**
* register_reboot_notifier - Register function to be called at reboot time
* Registers a function with the list of functions
* to be called at reboot time.
*
- * Currently always returns zero, as notifier_chain_register
+ * Currently always returns zero, as blocking_notifier_chain_register
* always returns zero.
*/
int register_reboot_notifier(struct notifier_block * nb)
{
- return notifier_chain_register(&reboot_notifier_list, nb);
+ return blocking_notifier_chain_register(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(register_reboot_notifier);
int unregister_reboot_notifier(struct notifier_block * nb)
{
- return notifier_chain_unregister(&reboot_notifier_list, nb);
+ return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(unregister_reboot_notifier);
}
EXPORT_SYMBOL_GPL(emergency_restart);
-void kernel_restart_prepare(char *cmd)
+static void kernel_restart_prepare(char *cmd)
{
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
+ blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
device_shutdown();
}
void kernel_restart(char *cmd)
{
kernel_restart_prepare(cmd);
- if (!cmd) {
+ if (!cmd)
printk(KERN_EMERG "Restarting system.\n");
- } else {
+ else
printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
- }
- printk(".\n");
machine_restart(cmd);
}
EXPORT_SYMBOL_GPL(kernel_restart);
* Move into place and start executing a preloaded standalone
* executable. If nothing was preloaded return an error.
*/
-void kernel_kexec(void)
+static void kernel_kexec(void)
{
#ifdef CONFIG_KEXEC
struct kimage *image;
image = xchg(&kexec_image, NULL);
- if (!image) {
+ if (!image)
return;
- }
kernel_restart_prepare(NULL);
printk(KERN_EMERG "Starting new kernel\n");
machine_shutdown();
machine_kexec(image);
#endif
}
-EXPORT_SYMBOL_GPL(kernel_kexec);
void kernel_shutdown_prepare(enum system_states state)
{
- notifier_call_chain(&reboot_notifier_list,
+ blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
system_state = state;
device_shutdown();
if (C_A_D)
schedule_work(&cad_work);
else
- kill_proc(cad_pid, SIGINT, 1);
+ kill_cad_pid(SIGINT, 1);
}
-
/*
* Unprivileged users may change the real gid to the effective gid
* or vice versa. (BSD-style)
(current->sgid == egid) ||
capable(CAP_SETGID))
new_egid = egid;
- else {
+ else
return -EPERM;
- }
}
- if (new_egid != old_egid)
- {
+ if (new_egid != old_egid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
if (retval)
return retval;
- if (capable(CAP_SETGID))
- {
- if(old_egid != gid)
- {
+ if (capable(CAP_SETGID)) {
+ if (old_egid != gid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
current->gid = current->egid = current->sgid = current->fsgid = gid;
- }
- else if ((gid == current->gid) || (gid == current->sgid))
- {
- if(old_egid != gid)
- {
+ } else if ((gid == current->gid) || (gid == current->sgid)) {
+ if (old_egid != gid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
switch_uid(new_user);
- if(dumpclear)
- {
+ if (dumpclear) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
return -EAGAIN;
- if (new_euid != old_euid)
- {
+ if (new_euid != old_euid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
} else if ((uid != current->uid) && (uid != new_suid))
return -EPERM;
- if (old_euid != uid)
- {
+ if (old_euid != uid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
return -EAGAIN;
}
if (euid != (uid_t) -1) {
- if (euid != current->euid)
- {
+ if (euid != current->euid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
return -EPERM;
}
if (egid != (gid_t) -1) {
- if (egid != current->egid)
- {
+ if (egid != current->egid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
if (uid == current->uid || uid == current->euid ||
uid == current->suid || uid == current->fsuid ||
- capable(CAP_SETUID))
- {
- if (uid != old_fsuid)
- {
+ capable(CAP_SETUID)) {
+ if (uid != old_fsuid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
if (gid == current->gid || gid == current->egid ||
gid == current->sgid || gid == current->fsgid ||
- capable(CAP_SETGID))
- {
- if (gid != old_fsgid)
- {
+ capable(CAP_SETGID)) {
+ if (gid != old_fsgid) {
current->mm->dumpable = suid_dumpable;
smp_wmb();
}
*/
if (tbuf) {
struct tms tmp;
+ struct task_struct *tsk = current;
+ struct task_struct *t;
cputime_t utime, stime, cutime, cstime;
-#ifdef CONFIG_SMP
- if (thread_group_empty(current)) {
- /*
- * Single thread case without the use of any locks.
- *
- * We may race with release_task if two threads are
- * executing. However, release task first adds up the
- * counters (__exit_signal) before removing the task
- * from the process tasklist (__unhash_process).
- * __exit_signal also acquires and releases the
- * siglock which results in the proper memory ordering
- * so that the list modifications are always visible
- * after the counters have been updated.
- *
- * If the counters have been updated by the second thread
- * but the thread has not yet been removed from the list
- * then the other branch will be executing which will
- * block on tasklist_lock until the exit handling of the
- * other task is finished.
- *
- * This also implies that the sighand->siglock cannot
- * be held by another processor. So we can also
- * skip acquiring that lock.
- */
- utime = cputime_add(current->signal->utime, current->utime);
- stime = cputime_add(current->signal->utime, current->stime);
- cutime = current->signal->cutime;
- cstime = current->signal->cstime;
- } else
-#endif
- {
+ spin_lock_irq(&tsk->sighand->siglock);
+ utime = tsk->signal->utime;
+ stime = tsk->signal->stime;
+ t = tsk;
+ do {
+ utime = cputime_add(utime, t->utime);
+ stime = cputime_add(stime, t->stime);
+ t = next_thread(t);
+ } while (t != tsk);
- /* Process with multiple threads */
- struct task_struct *tsk = current;
- struct task_struct *t;
+ cutime = tsk->signal->cutime;
+ cstime = tsk->signal->cstime;
+ spin_unlock_irq(&tsk->sighand->siglock);
- read_lock(&tasklist_lock);
- utime = tsk->signal->utime;
- stime = tsk->signal->stime;
- t = tsk;
- do {
- utime = cputime_add(utime, t->utime);
- stime = cputime_add(stime, t->stime);
- t = next_thread(t);
- } while (t != tsk);
-
- /*
- * While we have tasklist_lock read-locked, no dying thread
- * can be updating current->signal->[us]time. Instead,
- * we got their counts included in the live thread loop.
- * However, another thread can come in right now and
- * do a wait call that updates current->signal->c[us]time.
- * To make sure we always see that pair updated atomically,
- * we take the siglock around fetching them.
- */
- spin_lock_irq(&tsk->sighand->siglock);
- cutime = tsk->signal->cutime;
- cstime = tsk->signal->cstime;
- spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
- }
tmp.tms_utime = cputime_to_clock_t(utime);
tmp.tms_stime = cputime_to_clock_t(stime);
tmp.tms_cutime = cputime_to_clock_t(cutime);
asmlinkage long sys_getpgid(pid_t pid)
{
- if (!pid) {
+ if (!pid)
return process_group(current);
- } else {
+ else {
int retval;
struct task_struct *p;
asmlinkage long sys_getsid(pid_t pid)
{
- if (!pid) {
+ if (!pid)
return current->signal->session;
- } else {
+ else {
int retval;
struct task_struct *p;
p = find_task_by_pid(pid);
retval = -ESRCH;
- if(p) {
+ if (p) {
retval = security_task_getsid(p);
if (!retval)
retval = p->signal->session;
asmlinkage long sys_setsid(void)
{
struct task_struct *group_leader = current->group_leader;
- struct pid *pid;
+ pid_t session;
int err = -EPERM;
mutex_lock(&tty_mutex);
write_lock_irq(&tasklist_lock);
- pid = find_pid(PIDTYPE_PGID, group_leader->pid);
- if (pid)
+ /* Fail if I am already a session leader */
+ if (group_leader->signal->leader)
+ goto out;
+
+ session = group_leader->pid;
+ /* Fail if a process group id already exists that equals the
+ * proposed session id.
+ *
+ * Don't check if session id == 1 because kernel threads use this
+ * session id and so the check will always fail and make it so
+ * init cannot successfully call setsid.
+ */
+ if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
goto out;
group_leader->signal->leader = 1;
- __set_special_pids(group_leader->pid, group_leader->pid);
+ __set_special_pids(session, session);
group_leader->signal->tty = NULL;
group_leader->signal->tty_old_pgrp = 0;
err = process_group(group_leader);
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
- if (gidsetsize <= NGROUPS_SMALL) {
+ if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;
- } else {
+ else {
for (i = 0; i < nblocks; i++) {
gid_t *b;
b = (void *)__get_free_page(GFP_USER);
/* fill a group_info from a user-space array - it must be allocated already */
static int groups_from_user(struct group_info *group_info,
gid_t __user *grouplist)
- {
+{
int i;
int count = group_info->ngroups;
int in_group_p(gid_t grp)
{
int retval = 1;
- if (grp != current->fsgid) {
+ if (grp != current->fsgid)
retval = groups_search(current->group_info, grp);
- }
return retval;
}
int in_egroup_p(gid_t grp)
{
int retval = 1;
- if (grp != current->egid) {
+ if (grp != current->egid)
retval = groups_search(current->group_info, grp);
- }
return retval;
}
int errno = 0;
down_read(&uts_sem);
- if (copy_to_user(name,&system_utsname,sizeof *name))
+ if (copy_to_user(name, utsname(), sizeof *name))
errno = -EFAULT;
up_read(&uts_sem);
return errno;
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- memcpy(system_utsname.nodename, tmp, len);
- system_utsname.nodename[len] = 0;
+ memcpy(utsname()->nodename, tmp, len);
+ utsname()->nodename[len] = 0;
errno = 0;
}
up_write(&uts_sem);
if (len < 0)
return -EINVAL;
down_read(&uts_sem);
- i = 1 + strlen(system_utsname.nodename);
+ i = 1 + strlen(utsname()->nodename);
if (i > len)
i = len;
errno = 0;
- if (copy_to_user(name, system_utsname.nodename, i))
+ if (copy_to_user(name, utsname()->nodename, i))
errno = -EFAULT;
up_read(&uts_sem);
return errno;
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- memcpy(system_utsname.domainname, tmp, len);
- system_utsname.domainname[len] = 0;
+ memcpy(utsname()->domainname, tmp, len);
+ utsname()->domainname[len] = 0;
errno = 0;
}
up_write(&uts_sem);
task_lock(current->group_leader);
x = current->signal->rlim[resource];
task_unlock(current->group_leader);
- if(x.rlim_cur > 0x7FFFFFFF)
+ if (x.rlim_cur > 0x7FFFFFFF)
x.rlim_cur = 0x7FFFFFFF;
- if(x.rlim_max > 0x7FFFFFFF)
+ if (x.rlim_max > 0x7FFFFFFF)
x.rlim_max = 0x7FFFFFFF;
return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
}
* fields when reaping, so a sample either gets all the additions of a
* given child after it's reaped, or none so this sample is before reaping.
*
- * tasklist_lock locking optimisation:
- * If we are current and single threaded, we do not need to take the tasklist
- * lock or the siglock. No one else can take our signal_struct away,
- * no one else can reap the children to update signal->c* counters, and
- * no one else can race with the signal-> fields.
- * If we do not take the tasklist_lock, the signal-> fields could be read
- * out of order while another thread was just exiting. So we place a
- * read memory barrier when we avoid the lock. On the writer side,
- * write memory barrier is implied in __exit_signal as __exit_signal releases
- * the siglock spinlock after updating the signal-> fields.
- *
- * We don't really need the siglock when we access the non c* fields
- * of the signal_struct (for RUSAGE_SELF) even in multithreaded
- * case, since we take the tasklist lock for read and the non c* signal->
- * fields are updated only in __exit_signal, which is called with
- * tasklist_lock taken for write, hence these two threads cannot execute
- * concurrently.
+ * Locking:
+ * We need to take the siglock for CHILDEREN, SELF and BOTH
+ * for the cases current multithreaded, non-current single threaded
+ * non-current multithreaded. Thread traversal is now safe with
+ * the siglock held.
+ * Strictly speaking, we donot need to take the siglock if we are current and
+ * single threaded, as no one else can take our signal_struct away, no one
+ * else can reap the children to update signal->c* counters, and no one else
+ * can race with the signal-> fields. If we do not take any lock, the
+ * signal-> fields could be read out of order while another thread was just
+ * exiting. So we should place a read memory barrier when we avoid the lock.
+ * On the writer side, write memory barrier is implied in __exit_signal
+ * as __exit_signal releases the siglock spinlock after updating the signal->
+ * fields. But we don't do this yet to keep things simple.
*
*/
struct task_struct *t;
unsigned long flags;
cputime_t utime, stime;
- int need_lock = 0;
memset((char *) r, 0, sizeof *r);
utime = stime = cputime_zero;
- if (p != current || !thread_group_empty(p))
- need_lock = 1;
-
- if (need_lock) {
- read_lock(&tasklist_lock);
- if (unlikely(!p->signal)) {
- read_unlock(&tasklist_lock);
- return;
- }
- } else
- /* See locking comments above */
- smp_rmb();
+ rcu_read_lock();
+ if (!lock_task_sighand(p, &flags)) {
+ rcu_read_unlock();
+ return;
+ }
switch (who) {
case RUSAGE_BOTH:
case RUSAGE_CHILDREN:
- spin_lock_irqsave(&p->sighand->siglock, flags);
utime = p->signal->cutime;
stime = p->signal->cstime;
r->ru_nvcsw = p->signal->cnvcsw;
r->ru_nivcsw = p->signal->cnivcsw;
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
if (who == RUSAGE_CHILDREN)
break;
BUG();
}
- if (need_lock)
- read_unlock(&tasklist_lock);
+ unlock_task_sighand(p, &flags);
+ rcu_read_unlock();
+
cputime_to_timeval(utime, &r->ru_utime);
cputime_to_timeval(stime, &r->ru_stime);
}
error = current->mm->dumpable;
break;
case PR_SET_DUMPABLE:
- if (arg2 < 0 || arg2 > 2) {
+ if (arg2 < 0 || arg2 > 1) {
error = -EINVAL;
break;
}
return -EFAULT;
return 0;
}
+ case PR_GET_ENDIAN:
+ error = GET_ENDIAN(current, arg2);
+ break;
+ case PR_SET_ENDIAN:
+ error = SET_ENDIAN(current, arg2);
+ break;
+
default:
error = -EINVAL;
break;
}
return error;
}
+
+asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
+ struct getcpu_cache __user *cache)
+{
+ int err = 0;
+ int cpu = raw_smp_processor_id();
+ if (cpup)
+ err |= put_user(cpu, cpup);
+ if (nodep)
+ err |= put_user(cpu_to_node(cpu), nodep);
+ if (cache) {
+ /*
+ * The cache is not needed for this implementation,
+ * but make sure user programs pass something
+ * valid. vsyscall implementations can instead make
+ * good use of the cache. Only use t0 and t1 because
+ * these are available in both 32bit and 64bit ABI (no
+ * need for a compat_getcpu). 32bit has enough
+ * padding
+ */
+ unsigned long t0, t1;
+ get_user(t0, &cache->blob[0]);
+ get_user(t1, &cache->blob[1]);
+ t0++;
+ t1++;
+ put_user(t0, &cache->blob[0]);
+ put_user(t1, &cache->blob[1]);
+ }
+ return err ? -EFAULT : 0;
+}