2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * CPU hotplug support - ashok.raj@intel.com
16 * This file handles the architecture-dependent parts of process handling..
21 #include <linux/cpu.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
26 #include <linux/elfcore.h>
27 #include <linux/smp.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/module.h>
31 #include <linux/a.out.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/ptrace.h>
35 #include <linux/utsname.h>
36 #include <linux/random.h>
37 #include <linux/notifier.h>
38 #include <linux/kprobes.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
48 #include <asm/prctl.h>
49 #include <asm/kdebug.h>
51 #include <asm/proto.h>
55 asmlinkage extern void ret_from_fork(void);
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59 unsigned long boot_option_idle_override = 0;
60 EXPORT_SYMBOL(boot_option_idle_override);
63 * Powermanagement idle function, if any..
65 void (*pm_idle)(void);
66 EXPORT_SYMBOL(pm_idle);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
71 void idle_notifier_register(struct notifier_block *n)
73 atomic_notifier_chain_register(&idle_notifier, n);
75 EXPORT_SYMBOL_GPL(idle_notifier_register);
77 void idle_notifier_unregister(struct notifier_block *n)
79 atomic_notifier_chain_unregister(&idle_notifier, n);
81 EXPORT_SYMBOL(idle_notifier_unregister);
86 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
89 static void __exit_idle(void)
91 if (test_and_clear_bit_pda(0, isidle) == 0)
93 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
96 /* Called from interrupts to signify idle end */
99 /* idle loop has pid 0 */
106 * We use this if we don't have any better
109 static void default_idle(void)
113 current_thread_info()->status &= ~TS_POLLING;
114 smp_mb__after_clear_bit();
115 while (!need_resched()) {
122 current_thread_info()->status |= TS_POLLING;
126 * On SMP it's slightly faster (but much more power-consuming!)
127 * to poll the ->need_resched flag instead of waiting for the
128 * cross-CPU IPI to arrive. Use this option with caution.
130 static void poll_idle (void)
140 "i" (_TIF_NEED_RESCHED),
141 "m" (current_thread_info()->flags));
144 void cpu_idle_wait(void)
146 unsigned int cpu, this_cpu = get_cpu();
147 cpumask_t map, tmp = current->cpus_allowed;
149 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
153 for_each_online_cpu(cpu) {
154 per_cpu(cpu_idle_state, cpu) = 1;
158 __get_cpu_var(cpu_idle_state) = 0;
163 for_each_online_cpu(cpu) {
164 if (cpu_isset(cpu, map) &&
165 !per_cpu(cpu_idle_state, cpu))
168 cpus_and(map, map, cpu_online_map);
169 } while (!cpus_empty(map));
171 set_cpus_allowed(current, tmp);
173 EXPORT_SYMBOL_GPL(cpu_idle_wait);
175 #ifdef CONFIG_HOTPLUG_CPU
176 DECLARE_PER_CPU(int, cpu_state);
179 /* We halt the CPU with physical CPU hotplug */
180 static inline void play_dead(void)
186 __get_cpu_var(cpu_state) = CPU_DEAD;
193 static inline void play_dead(void)
197 #endif /* CONFIG_HOTPLUG_CPU */
200 * The idle thread. There's no useful work to be
201 * done, so just try to conserve power and have a
202 * low exit latency (ie sit in a loop waiting for
203 * somebody to say that they'd like to reschedule)
207 current_thread_info()->status |= TS_POLLING;
208 /* endless idle loop with no priority at all */
210 while (!need_resched()) {
213 if (__get_cpu_var(cpu_idle_state))
214 __get_cpu_var(cpu_idle_state) = 0;
220 if (cpu_is_offline(smp_processor_id()))
224 /* In many cases the interrupt that ended idle
225 has already called exit_idle. But some idle
226 loops can be woken up without interrupt. */
230 preempt_enable_no_resched();
237 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
238 * which can obviate IPI to trigger checking of need_resched.
239 * We execute MONITOR against need_resched and enter optimized wait state
240 * through MWAIT. Whenever someone changes need_resched, we would be woken
241 * up from MWAIT (without an IPI).
243 * New with Core Duo processors, MWAIT can take some hints based on CPU
246 void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
248 if (!need_resched()) {
249 __monitor((void *)¤t_thread_info()->flags, 0, 0);
256 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
257 static void mwait_idle(void)
260 while (!need_resched())
261 mwait_idle_with_hints(0,0);
264 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
267 if (cpu_has(c, X86_FEATURE_MWAIT)) {
269 * Skip, if setup has overridden idle.
270 * One CPU supports mwait => All CPUs supports mwait
274 printk("using mwait in idle threads.\n");
277 pm_idle = mwait_idle;
282 static int __init idle_setup (char *str)
284 if (!strncmp(str, "poll", 4)) {
285 printk("using polling idle threads.\n");
289 boot_option_idle_override = 1;
293 __setup("idle=", idle_setup);
295 /* Prints also some state that isn't saved in the pt_regs */
296 void __show_regs(struct pt_regs * regs)
298 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
299 unsigned int fsindex,gsindex;
300 unsigned int ds,cs,es;
304 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
305 current->pid, current->comm, print_tainted(),
306 init_utsname()->release,
307 (int)strcspn(init_utsname()->version, " "),
308 init_utsname()->version);
309 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
310 printk_address(regs->rip);
311 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
313 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
314 regs->rax, regs->rbx, regs->rcx);
315 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
316 regs->rdx, regs->rsi, regs->rdi);
317 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
318 regs->rbp, regs->r8, regs->r9);
319 printk("R10: %016lx R11: %016lx R12: %016lx\n",
320 regs->r10, regs->r11, regs->r12);
321 printk("R13: %016lx R14: %016lx R15: %016lx\n",
322 regs->r13, regs->r14, regs->r15);
324 asm("movl %%ds,%0" : "=r" (ds));
325 asm("movl %%cs,%0" : "=r" (cs));
326 asm("movl %%es,%0" : "=r" (es));
327 asm("movl %%fs,%0" : "=r" (fsindex));
328 asm("movl %%gs,%0" : "=r" (gsindex));
330 rdmsrl(MSR_FS_BASE, fs);
331 rdmsrl(MSR_GS_BASE, gs);
332 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
334 asm("movq %%cr0, %0": "=r" (cr0));
335 asm("movq %%cr2, %0": "=r" (cr2));
336 asm("movq %%cr3, %0": "=r" (cr3));
337 asm("movq %%cr4, %0": "=r" (cr4));
339 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
340 fs,fsindex,gs,gsindex,shadowgs);
341 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
342 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
345 void show_regs(struct pt_regs *regs)
347 printk("CPU %d:", smp_processor_id());
349 show_trace(NULL, regs, (void *)(regs + 1));
353 * Free current thread data structures etc..
355 void exit_thread(void)
357 struct task_struct *me = current;
358 struct thread_struct *t = &me->thread;
360 if (me->thread.io_bitmap_ptr) {
361 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
363 kfree(t->io_bitmap_ptr);
364 t->io_bitmap_ptr = NULL;
365 clear_thread_flag(TIF_IO_BITMAP);
367 * Careful, clear this in the TSS too:
369 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
370 t->io_bitmap_max = 0;
375 void flush_thread(void)
377 struct task_struct *tsk = current;
378 struct thread_info *t = current_thread_info();
380 if (t->flags & _TIF_ABI_PENDING) {
381 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
382 if (t->flags & _TIF_IA32)
383 current_thread_info()->status |= TS_COMPAT;
385 t->flags &= ~_TIF_DEBUG;
387 tsk->thread.debugreg0 = 0;
388 tsk->thread.debugreg1 = 0;
389 tsk->thread.debugreg2 = 0;
390 tsk->thread.debugreg3 = 0;
391 tsk->thread.debugreg6 = 0;
392 tsk->thread.debugreg7 = 0;
393 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
395 * Forget coprocessor state..
401 void release_thread(struct task_struct *dead_task)
404 if (dead_task->mm->context.size) {
405 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
407 dead_task->mm->context.ldt,
408 dead_task->mm->context.size);
414 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
416 struct user_desc ud = {
423 struct n_desc_struct *desc = (void *)t->thread.tls_array;
425 desc->a = LDT_entry_a(&ud);
426 desc->b = LDT_entry_b(&ud);
429 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
431 struct desc_struct *desc = (void *)t->thread.tls_array;
434 (((u32)desc->base1) << 16) |
435 (((u32)desc->base2) << 24);
439 * This gets called before we allocate a new thread and copy
440 * the current task into it.
442 void prepare_to_copy(struct task_struct *tsk)
447 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
448 unsigned long unused,
449 struct task_struct * p, struct pt_regs * regs)
452 struct pt_regs * childregs;
453 struct task_struct *me = current;
455 childregs = ((struct pt_regs *)
456 (THREAD_SIZE + task_stack_page(p))) - 1;
460 childregs->rsp = rsp;
462 childregs->rsp = (unsigned long)childregs;
464 p->thread.rsp = (unsigned long) childregs;
465 p->thread.rsp0 = (unsigned long) (childregs+1);
466 p->thread.userrsp = me->thread.userrsp;
468 set_tsk_thread_flag(p, TIF_FORK);
470 p->thread.fs = me->thread.fs;
471 p->thread.gs = me->thread.gs;
473 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
474 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
475 asm("mov %%es,%0" : "=m" (p->thread.es));
476 asm("mov %%ds,%0" : "=m" (p->thread.ds));
478 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
479 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
480 if (!p->thread.io_bitmap_ptr) {
481 p->thread.io_bitmap_max = 0;
484 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
486 set_tsk_thread_flag(p, TIF_IO_BITMAP);
490 * Set a new TLS for the child thread?
492 if (clone_flags & CLONE_SETTLS) {
493 #ifdef CONFIG_IA32_EMULATION
494 if (test_thread_flag(TIF_IA32))
495 err = ia32_child_tls(p, childregs);
498 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
504 if (err && p->thread.io_bitmap_ptr) {
505 kfree(p->thread.io_bitmap_ptr);
506 p->thread.io_bitmap_max = 0;
512 * This special macro can be used to load a debugging register
514 #define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
516 static inline void __switch_to_xtra(struct task_struct *prev_p,
517 struct task_struct *next_p,
518 struct tss_struct *tss)
520 struct thread_struct *prev, *next;
522 prev = &prev_p->thread,
523 next = &next_p->thread;
525 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
535 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
537 * Copy the relevant range of the IO bitmap.
538 * Normally this is 128 bytes or less:
540 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
541 max(prev->io_bitmap_max, next->io_bitmap_max));
542 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
544 * Clear any possible leftover bits:
546 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
551 * switch_to(x,y) should switch tasks from x to y.
553 * This could still be optimized:
554 * - fold all the options into a flag word and test it with a single test.
555 * - could test fs/gs bitsliced
557 * Kprobes not supported here. Set the probe on schedule instead.
559 __kprobes struct task_struct *
560 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
562 struct thread_struct *prev = &prev_p->thread,
563 *next = &next_p->thread;
564 int cpu = smp_processor_id();
565 struct tss_struct *tss = &per_cpu(init_tss, cpu);
567 /* we're going to use this soon, after a few expensive things */
568 if (next_p->fpu_counter>5)
569 prefetch(&next->i387.fxsave);
572 * Reload esp0, LDT and the page table pointer:
574 tss->rsp0 = next->rsp0;
578 * This won't pick up thread selector changes, but I guess that is ok.
580 asm volatile("mov %%es,%0" : "=m" (prev->es));
581 if (unlikely(next->es | prev->es))
582 loadsegment(es, next->es);
584 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
585 if (unlikely(next->ds | prev->ds))
586 loadsegment(ds, next->ds);
595 asm volatile("movl %%fs,%0" : "=r" (fsindex));
596 /* segment register != 0 always requires a reload.
597 also reload when it has changed.
598 when prev process used 64bit base always reload
599 to avoid an information leak. */
600 if (unlikely(fsindex | next->fsindex | prev->fs)) {
601 loadsegment(fs, next->fsindex);
602 /* check if the user used a selector != 0
603 * if yes clear 64bit base, since overloaded base
604 * is always mapped to the Null selector
609 /* when next process has a 64bit base use it */
611 wrmsrl(MSR_FS_BASE, next->fs);
612 prev->fsindex = fsindex;
616 asm volatile("movl %%gs,%0" : "=r" (gsindex));
617 if (unlikely(gsindex | next->gsindex | prev->gs)) {
618 load_gs_index(next->gsindex);
623 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
624 prev->gsindex = gsindex;
627 /* Must be after DS reload */
631 * Switch the PDA and FPU contexts.
633 prev->userrsp = read_pda(oldrsp);
634 write_pda(oldrsp, next->userrsp);
635 write_pda(pcurrent, next_p);
637 write_pda(kernelstack,
638 (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
639 #ifdef CONFIG_CC_STACKPROTECTOR
640 write_pda(stack_canary, next_p->stack_canary);
642 * Build time only check to make sure the stack_canary is at
643 * offset 40 in the pda; this is a gcc ABI requirement
645 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
649 * Now maybe reload the debug registers and handle I/O bitmaps
651 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
652 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
653 __switch_to_xtra(prev_p, next_p, tss);
655 /* If the task has used fpu the last 5 timeslices, just do a full
656 * restore of the math state immediately to avoid the trap; the
657 * chances of needing FPU soon are obviously high now
659 if (next_p->fpu_counter>5)
660 math_state_restore();
665 * sys_execve() executes a new program.
668 long sys_execve(char __user *name, char __user * __user *argv,
669 char __user * __user *envp, struct pt_regs regs)
674 filename = getname(name);
675 error = PTR_ERR(filename);
676 if (IS_ERR(filename))
678 error = do_execve(filename, argv, envp, ®s);
681 current->ptrace &= ~PT_DTRACE;
682 task_unlock(current);
688 void set_personality_64bit(void)
690 /* inherit personality from parent */
692 /* Make sure to be in 64bit mode */
693 clear_thread_flag(TIF_IA32);
695 /* TBD: overwrites user setup. Should have two bits.
696 But 64bit processes have always behaved this way,
697 so it's not too bad. The main problem is just that
698 32bit childs are affected again. */
699 current->personality &= ~READ_IMPLIES_EXEC;
702 asmlinkage long sys_fork(struct pt_regs *regs)
704 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
708 sys_clone(unsigned long clone_flags, unsigned long newsp,
709 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
713 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
717 * This is trivial, and on the face of it looks like it
718 * could equally well be done in user mode.
720 * Not so, for quite unobvious reasons - register pressure.
721 * In user mode vfork() cannot have a stack frame, and if
722 * done by calling the "clone()" system call directly, you
723 * do not have enough call-clobbered registers to hold all
724 * the information you need.
726 asmlinkage long sys_vfork(struct pt_regs *regs)
728 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
732 unsigned long get_wchan(struct task_struct *p)
738 if (!p || p == current || p->state==TASK_RUNNING)
740 stack = (unsigned long)task_stack_page(p);
741 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
743 fp = *(u64 *)(p->thread.rsp);
745 if (fp < (unsigned long)stack ||
746 fp > (unsigned long)stack+THREAD_SIZE)
748 rip = *(u64 *)(fp+8);
749 if (!in_sched_functions(rip))
752 } while (count++ < 16);
756 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
759 int doit = task == current;
764 if (addr >= TASK_SIZE_OF(task))
767 /* handle small bases via the GDT because that's faster to
769 if (addr <= 0xffffffff) {
770 set_32bit_tls(task, GS_TLS, addr);
772 load_TLS(&task->thread, cpu);
773 load_gs_index(GS_TLS_SEL);
775 task->thread.gsindex = GS_TLS_SEL;
778 task->thread.gsindex = 0;
779 task->thread.gs = addr;
782 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
788 /* Not strictly needed for fs, but do it for symmetry
790 if (addr >= TASK_SIZE_OF(task))
793 /* handle small bases via the GDT because that's faster to
795 if (addr <= 0xffffffff) {
796 set_32bit_tls(task, FS_TLS, addr);
798 load_TLS(&task->thread, cpu);
799 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
801 task->thread.fsindex = FS_TLS_SEL;
804 task->thread.fsindex = 0;
805 task->thread.fs = addr;
807 /* set the selector to 0 to not confuse
809 asm volatile("movl %0,%%fs" :: "r" (0));
810 ret = checking_wrmsrl(MSR_FS_BASE, addr);
817 if (task->thread.fsindex == FS_TLS_SEL)
818 base = read_32bit_tls(task, FS_TLS);
820 rdmsrl(MSR_FS_BASE, base);
822 base = task->thread.fs;
823 ret = put_user(base, (unsigned long __user *)addr);
829 if (task->thread.gsindex == GS_TLS_SEL)
830 base = read_32bit_tls(task, GS_TLS);
832 asm("movl %%gs,%0" : "=r" (gsindex));
834 rdmsrl(MSR_KERNEL_GS_BASE, base);
836 base = task->thread.gs;
839 base = task->thread.gs;
840 ret = put_user(base, (unsigned long __user *)addr);
852 long sys_arch_prctl(int code, unsigned long addr)
854 return do_arch_prctl(current, code, addr);
858 * Capture the user space registers if the task is not running (in user space)
860 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
862 struct pt_regs *pp, ptregs;
864 pp = task_pt_regs(tsk);
870 elf_core_copy_regs(regs, &ptregs);
875 unsigned long arch_align_stack(unsigned long sp)
877 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
878 sp -= get_random_int() % 8192;