2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
17 * This file handles the architecture-dependent parts of process handling..
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/module.h>
32 #include <linux/a.out.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/ptrace.h>
36 #include <linux/utsname.h>
37 #include <linux/random.h>
38 #include <linux/kprobes.h>
39 #include <linux/notifier.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
49 #include <asm/prctl.h>
50 #include <asm/kdebug.h>
52 #include <asm/proto.h>
56 asmlinkage extern void ret_from_fork(void);
58 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
60 unsigned long boot_option_idle_override = 0;
61 EXPORT_SYMBOL(boot_option_idle_override);
64 * Powermanagement idle function, if any..
66 void (*pm_idle)(void);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 static struct notifier_block *idle_notifier;
70 static DEFINE_SPINLOCK(idle_notifier_lock);
72 void idle_notifier_register(struct notifier_block *n)
75 spin_lock_irqsave(&idle_notifier_lock, flags);
76 notifier_chain_register(&idle_notifier, n);
77 spin_unlock_irqrestore(&idle_notifier_lock, flags);
79 EXPORT_SYMBOL_GPL(idle_notifier_register);
81 void idle_notifier_unregister(struct notifier_block *n)
84 spin_lock_irqsave(&idle_notifier_lock, flags);
85 notifier_chain_unregister(&idle_notifier, n);
86 spin_unlock_irqrestore(&idle_notifier_lock, flags);
88 EXPORT_SYMBOL(idle_notifier_unregister);
90 enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
91 static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
95 __get_cpu_var(idle_state) = CPU_IDLE;
96 notifier_call_chain(&idle_notifier, IDLE_START, NULL);
99 static void __exit_idle(void)
101 __get_cpu_var(idle_state) = CPU_NOT_IDLE;
102 notifier_call_chain(&idle_notifier, IDLE_END, NULL);
105 /* Called from interrupts to signify idle end */
108 if (current->pid | read_pda(irqcount))
114 * We use this if we don't have any better
117 static void default_idle(void)
121 clear_thread_flag(TIF_POLLING_NRFLAG);
122 smp_mb__after_clear_bit();
123 while (!need_resched()) {
130 set_thread_flag(TIF_POLLING_NRFLAG);
134 * On SMP it's slightly faster (but much more power-consuming!)
135 * to poll the ->need_resched flag instead of waiting for the
136 * cross-CPU IPI to arrive. Use this option with caution.
138 static void poll_idle (void)
148 "i" (_TIF_NEED_RESCHED),
149 "m" (current_thread_info()->flags));
152 void cpu_idle_wait(void)
154 unsigned int cpu, this_cpu = get_cpu();
157 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
161 for_each_online_cpu(cpu) {
162 per_cpu(cpu_idle_state, cpu) = 1;
166 __get_cpu_var(cpu_idle_state) = 0;
171 for_each_online_cpu(cpu) {
172 if (cpu_isset(cpu, map) &&
173 !per_cpu(cpu_idle_state, cpu))
176 cpus_and(map, map, cpu_online_map);
177 } while (!cpus_empty(map));
179 EXPORT_SYMBOL_GPL(cpu_idle_wait);
181 #ifdef CONFIG_HOTPLUG_CPU
182 DECLARE_PER_CPU(int, cpu_state);
185 /* We halt the CPU with physical CPU hotplug */
186 static inline void play_dead(void)
192 __get_cpu_var(cpu_state) = CPU_DEAD;
199 static inline void play_dead(void)
203 #endif /* CONFIG_HOTPLUG_CPU */
206 * The idle thread. There's no useful work to be
207 * done, so just try to conserve power and have a
208 * low exit latency (ie sit in a loop waiting for
209 * somebody to say that they'd like to reschedule)
213 set_thread_flag(TIF_POLLING_NRFLAG);
215 /* endless idle loop with no priority at all */
217 while (!need_resched()) {
220 if (__get_cpu_var(cpu_idle_state))
221 __get_cpu_var(cpu_idle_state) = 0;
227 if (cpu_is_offline(smp_processor_id()))
234 preempt_enable_no_resched();
241 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
242 * which can obviate IPI to trigger checking of need_resched.
243 * We execute MONITOR against need_resched and enter optimized wait state
244 * through MWAIT. Whenever someone changes need_resched, we would be woken
245 * up from MWAIT (without an IPI).
247 static void mwait_idle(void)
251 while (!need_resched()) {
252 __monitor((void *)¤t_thread_info()->flags, 0, 0);
260 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
263 if (cpu_has(c, X86_FEATURE_MWAIT)) {
265 * Skip, if setup has overridden idle.
266 * One CPU supports mwait => All CPUs supports mwait
270 printk("using mwait in idle threads.\n");
273 pm_idle = mwait_idle;
278 static int __init idle_setup (char *str)
280 if (!strncmp(str, "poll", 4)) {
281 printk("using polling idle threads.\n");
285 boot_option_idle_override = 1;
289 __setup("idle=", idle_setup);
291 /* Prints also some state that isn't saved in the pt_regs */
292 void __show_regs(struct pt_regs * regs)
294 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
295 unsigned int fsindex,gsindex;
296 unsigned int ds,cs,es;
300 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
301 current->pid, current->comm, print_tainted(),
302 system_utsname.release,
303 (int)strcspn(system_utsname.version, " "),
304 system_utsname.version);
305 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
306 printk_address(regs->rip);
307 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
309 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
310 regs->rax, regs->rbx, regs->rcx);
311 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
312 regs->rdx, regs->rsi, regs->rdi);
313 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
314 regs->rbp, regs->r8, regs->r9);
315 printk("R10: %016lx R11: %016lx R12: %016lx\n",
316 regs->r10, regs->r11, regs->r12);
317 printk("R13: %016lx R14: %016lx R15: %016lx\n",
318 regs->r13, regs->r14, regs->r15);
320 asm("movl %%ds,%0" : "=r" (ds));
321 asm("movl %%cs,%0" : "=r" (cs));
322 asm("movl %%es,%0" : "=r" (es));
323 asm("movl %%fs,%0" : "=r" (fsindex));
324 asm("movl %%gs,%0" : "=r" (gsindex));
326 rdmsrl(MSR_FS_BASE, fs);
327 rdmsrl(MSR_GS_BASE, gs);
328 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
330 asm("movq %%cr0, %0": "=r" (cr0));
331 asm("movq %%cr2, %0": "=r" (cr2));
332 asm("movq %%cr3, %0": "=r" (cr3));
333 asm("movq %%cr4, %0": "=r" (cr4));
335 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
336 fs,fsindex,gs,gsindex,shadowgs);
337 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
338 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
341 void show_regs(struct pt_regs *regs)
343 printk("CPU %d:", smp_processor_id());
345 show_trace(®s->rsp);
349 * Free current thread data structures etc..
351 void exit_thread(void)
353 struct task_struct *me = current;
354 struct thread_struct *t = &me->thread;
357 * Remove function-return probe instances associated with this task
358 * and put them back on the free list. Do not insert an exit probe for
359 * this function, it will be disabled by kprobe_flush_task if you do.
361 kprobe_flush_task(me);
363 if (me->thread.io_bitmap_ptr) {
364 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
366 kfree(t->io_bitmap_ptr);
367 t->io_bitmap_ptr = NULL;
369 * Careful, clear this in the TSS too:
371 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
372 t->io_bitmap_max = 0;
377 void flush_thread(void)
379 struct task_struct *tsk = current;
380 struct thread_info *t = current_thread_info();
382 if (t->flags & _TIF_ABI_PENDING)
383 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
385 tsk->thread.debugreg0 = 0;
386 tsk->thread.debugreg1 = 0;
387 tsk->thread.debugreg2 = 0;
388 tsk->thread.debugreg3 = 0;
389 tsk->thread.debugreg6 = 0;
390 tsk->thread.debugreg7 = 0;
391 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
393 * Forget coprocessor state..
399 void release_thread(struct task_struct *dead_task)
402 if (dead_task->mm->context.size) {
403 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
405 dead_task->mm->context.ldt,
406 dead_task->mm->context.size);
412 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
414 struct user_desc ud = {
421 struct n_desc_struct *desc = (void *)t->thread.tls_array;
423 desc->a = LDT_entry_a(&ud);
424 desc->b = LDT_entry_b(&ud);
427 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
429 struct desc_struct *desc = (void *)t->thread.tls_array;
432 (((u32)desc->base1) << 16) |
433 (((u32)desc->base2) << 24);
437 * This gets called before we allocate a new thread and copy
438 * the current task into it.
440 void prepare_to_copy(struct task_struct *tsk)
445 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
446 unsigned long unused,
447 struct task_struct * p, struct pt_regs * regs)
450 struct pt_regs * childregs;
451 struct task_struct *me = current;
453 childregs = ((struct pt_regs *)
454 (THREAD_SIZE + task_stack_page(p))) - 1;
458 childregs->rsp = rsp;
460 childregs->rsp = (unsigned long)childregs;
462 p->thread.rsp = (unsigned long) childregs;
463 p->thread.rsp0 = (unsigned long) (childregs+1);
464 p->thread.userrsp = me->thread.userrsp;
466 set_tsk_thread_flag(p, TIF_FORK);
468 p->thread.fs = me->thread.fs;
469 p->thread.gs = me->thread.gs;
471 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
472 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
473 asm("mov %%es,%0" : "=m" (p->thread.es));
474 asm("mov %%ds,%0" : "=m" (p->thread.ds));
476 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
477 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
478 if (!p->thread.io_bitmap_ptr) {
479 p->thread.io_bitmap_max = 0;
482 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
487 * Set a new TLS for the child thread?
489 if (clone_flags & CLONE_SETTLS) {
490 #ifdef CONFIG_IA32_EMULATION
491 if (test_thread_flag(TIF_IA32))
492 err = ia32_child_tls(p, childregs);
495 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
501 if (err && p->thread.io_bitmap_ptr) {
502 kfree(p->thread.io_bitmap_ptr);
503 p->thread.io_bitmap_max = 0;
509 * This special macro can be used to load a debugging register
511 #define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
514 * switch_to(x,y) should switch tasks from x to y.
516 * This could still be optimized:
517 * - fold all the options into a flag word and test it with a single test.
518 * - could test fs/gs bitsliced
520 * Kprobes not supported here. Set the probe on schedule instead.
522 __kprobes struct task_struct *
523 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
525 struct thread_struct *prev = &prev_p->thread,
526 *next = &next_p->thread;
527 int cpu = smp_processor_id();
528 struct tss_struct *tss = &per_cpu(init_tss, cpu);
531 * Reload esp0, LDT and the page table pointer:
533 tss->rsp0 = next->rsp0;
537 * This won't pick up thread selector changes, but I guess that is ok.
539 asm volatile("mov %%es,%0" : "=m" (prev->es));
540 if (unlikely(next->es | prev->es))
541 loadsegment(es, next->es);
543 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
544 if (unlikely(next->ds | prev->ds))
545 loadsegment(ds, next->ds);
554 asm volatile("movl %%fs,%0" : "=r" (fsindex));
555 /* segment register != 0 always requires a reload.
556 also reload when it has changed.
557 when prev process used 64bit base always reload
558 to avoid an information leak. */
559 if (unlikely(fsindex | next->fsindex | prev->fs)) {
560 loadsegment(fs, next->fsindex);
561 /* check if the user used a selector != 0
562 * if yes clear 64bit base, since overloaded base
563 * is always mapped to the Null selector
568 /* when next process has a 64bit base use it */
570 wrmsrl(MSR_FS_BASE, next->fs);
571 prev->fsindex = fsindex;
575 asm volatile("movl %%gs,%0" : "=r" (gsindex));
576 if (unlikely(gsindex | next->gsindex | prev->gs)) {
577 load_gs_index(next->gsindex);
582 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
583 prev->gsindex = gsindex;
587 * Switch the PDA and FPU contexts.
589 prev->userrsp = read_pda(oldrsp);
590 write_pda(oldrsp, next->userrsp);
591 write_pda(pcurrent, next_p);
592 /* This must be here to ensure both math_state_restore() and
593 kernel_fpu_begin() work consistently. */
595 write_pda(kernelstack,
596 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
599 * Now maybe reload the debug registers
601 if (unlikely(next->debugreg7)) {
613 * Handle the IO bitmap
615 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
616 if (next->io_bitmap_ptr)
618 * Copy the relevant range of the IO bitmap.
619 * Normally this is 128 bytes or less:
621 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
622 max(prev->io_bitmap_max, next->io_bitmap_max));
625 * Clear any possible leftover bits:
627 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
635 * sys_execve() executes a new program.
638 long sys_execve(char __user *name, char __user * __user *argv,
639 char __user * __user *envp, struct pt_regs regs)
644 filename = getname(name);
645 error = PTR_ERR(filename);
646 if (IS_ERR(filename))
648 error = do_execve(filename, argv, envp, ®s);
651 current->ptrace &= ~PT_DTRACE;
652 task_unlock(current);
658 void set_personality_64bit(void)
660 /* inherit personality from parent */
662 /* Make sure to be in 64bit mode */
663 clear_thread_flag(TIF_IA32);
665 /* TBD: overwrites user setup. Should have two bits.
666 But 64bit processes have always behaved this way,
667 so it's not too bad. The main problem is just that
668 32bit childs are affected again. */
669 current->personality &= ~READ_IMPLIES_EXEC;
672 asmlinkage long sys_fork(struct pt_regs *regs)
674 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
678 sys_clone(unsigned long clone_flags, unsigned long newsp,
679 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
683 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
687 * This is trivial, and on the face of it looks like it
688 * could equally well be done in user mode.
690 * Not so, for quite unobvious reasons - register pressure.
691 * In user mode vfork() cannot have a stack frame, and if
692 * done by calling the "clone()" system call directly, you
693 * do not have enough call-clobbered registers to hold all
694 * the information you need.
696 asmlinkage long sys_vfork(struct pt_regs *regs)
698 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
702 unsigned long get_wchan(struct task_struct *p)
708 if (!p || p == current || p->state==TASK_RUNNING)
710 stack = (unsigned long)task_stack_page(p);
711 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
713 fp = *(u64 *)(p->thread.rsp);
715 if (fp < (unsigned long)stack ||
716 fp > (unsigned long)stack+THREAD_SIZE)
718 rip = *(u64 *)(fp+8);
719 if (!in_sched_functions(rip))
722 } while (count++ < 16);
726 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
729 int doit = task == current;
734 if (addr >= TASK_SIZE_OF(task))
737 /* handle small bases via the GDT because that's faster to
739 if (addr <= 0xffffffff) {
740 set_32bit_tls(task, GS_TLS, addr);
742 load_TLS(&task->thread, cpu);
743 load_gs_index(GS_TLS_SEL);
745 task->thread.gsindex = GS_TLS_SEL;
748 task->thread.gsindex = 0;
749 task->thread.gs = addr;
752 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
758 /* Not strictly needed for fs, but do it for symmetry
760 if (addr >= TASK_SIZE_OF(task))
763 /* handle small bases via the GDT because that's faster to
765 if (addr <= 0xffffffff) {
766 set_32bit_tls(task, FS_TLS, addr);
768 load_TLS(&task->thread, cpu);
769 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
771 task->thread.fsindex = FS_TLS_SEL;
774 task->thread.fsindex = 0;
775 task->thread.fs = addr;
777 /* set the selector to 0 to not confuse
779 asm volatile("movl %0,%%fs" :: "r" (0));
780 ret = checking_wrmsrl(MSR_FS_BASE, addr);
787 if (task->thread.fsindex == FS_TLS_SEL)
788 base = read_32bit_tls(task, FS_TLS);
790 rdmsrl(MSR_FS_BASE, base);
792 base = task->thread.fs;
793 ret = put_user(base, (unsigned long __user *)addr);
798 if (task->thread.gsindex == GS_TLS_SEL)
799 base = read_32bit_tls(task, GS_TLS);
801 rdmsrl(MSR_KERNEL_GS_BASE, base);
803 base = task->thread.gs;
804 ret = put_user(base, (unsigned long __user *)addr);
816 long sys_arch_prctl(int code, unsigned long addr)
818 return do_arch_prctl(current, code, addr);
822 * Capture the user space registers if the task is not running (in user space)
824 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
826 struct pt_regs *pp, ptregs;
828 pp = task_pt_regs(tsk);
834 elf_core_copy_regs(regs, &ptregs);
839 unsigned long arch_align_stack(unsigned long sp)
841 if (randomize_va_space)
842 sp -= get_random_int() % 8192;