2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * CPU hotplug support - ashok.raj@intel.com
16 * This file handles the architecture-dependent parts of process handling..
21 #include <linux/cpu.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
26 #include <linux/elfcore.h>
27 #include <linux/smp.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/module.h>
31 #include <linux/a.out.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/ptrace.h>
35 #include <linux/utsname.h>
36 #include <linux/random.h>
37 #include <linux/notifier.h>
38 #include <linux/kprobes.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
48 #include <asm/prctl.h>
49 #include <asm/kdebug.h>
51 #include <asm/proto.h>
55 asmlinkage extern void ret_from_fork(void);
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59 unsigned long boot_option_idle_override = 0;
60 EXPORT_SYMBOL(boot_option_idle_override);
63 * Powermanagement idle function, if any..
65 void (*pm_idle)(void);
66 EXPORT_SYMBOL(pm_idle);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
71 void idle_notifier_register(struct notifier_block *n)
73 atomic_notifier_chain_register(&idle_notifier, n);
75 EXPORT_SYMBOL_GPL(idle_notifier_register);
77 void idle_notifier_unregister(struct notifier_block *n)
79 atomic_notifier_chain_unregister(&idle_notifier, n);
81 EXPORT_SYMBOL(idle_notifier_unregister);
83 enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
84 static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
88 __get_cpu_var(idle_state) = CPU_IDLE;
89 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
92 static void __exit_idle(void)
94 __get_cpu_var(idle_state) = CPU_NOT_IDLE;
95 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
98 /* Called from interrupts to signify idle end */
101 if (current->pid | read_pda(irqcount))
107 * We use this if we don't have any better
110 static void default_idle(void)
114 current_thread_info()->status &= ~TS_POLLING;
115 smp_mb__after_clear_bit();
116 while (!need_resched()) {
123 current_thread_info()->status |= TS_POLLING;
127 * On SMP it's slightly faster (but much more power-consuming!)
128 * to poll the ->need_resched flag instead of waiting for the
129 * cross-CPU IPI to arrive. Use this option with caution.
131 static void poll_idle (void)
141 "i" (_TIF_NEED_RESCHED),
142 "m" (current_thread_info()->flags));
145 void cpu_idle_wait(void)
147 unsigned int cpu, this_cpu = get_cpu();
150 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
154 for_each_online_cpu(cpu) {
155 per_cpu(cpu_idle_state, cpu) = 1;
159 __get_cpu_var(cpu_idle_state) = 0;
164 for_each_online_cpu(cpu) {
165 if (cpu_isset(cpu, map) &&
166 !per_cpu(cpu_idle_state, cpu))
169 cpus_and(map, map, cpu_online_map);
170 } while (!cpus_empty(map));
172 EXPORT_SYMBOL_GPL(cpu_idle_wait);
174 #ifdef CONFIG_HOTPLUG_CPU
175 DECLARE_PER_CPU(int, cpu_state);
178 /* We halt the CPU with physical CPU hotplug */
179 static inline void play_dead(void)
185 __get_cpu_var(cpu_state) = CPU_DEAD;
192 static inline void play_dead(void)
196 #endif /* CONFIG_HOTPLUG_CPU */
199 * The idle thread. There's no useful work to be
200 * done, so just try to conserve power and have a
201 * low exit latency (ie sit in a loop waiting for
202 * somebody to say that they'd like to reschedule)
206 current_thread_info()->status |= TS_POLLING;
207 /* endless idle loop with no priority at all */
209 while (!need_resched()) {
212 if (__get_cpu_var(cpu_idle_state))
213 __get_cpu_var(cpu_idle_state) = 0;
219 if (cpu_is_offline(smp_processor_id()))
226 preempt_enable_no_resched();
233 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
234 * which can obviate IPI to trigger checking of need_resched.
235 * We execute MONITOR against need_resched and enter optimized wait state
236 * through MWAIT. Whenever someone changes need_resched, we would be woken
237 * up from MWAIT (without an IPI).
239 static void mwait_idle(void)
243 while (!need_resched()) {
244 __monitor((void *)¤t_thread_info()->flags, 0, 0);
252 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
255 if (cpu_has(c, X86_FEATURE_MWAIT)) {
257 * Skip, if setup has overridden idle.
258 * One CPU supports mwait => All CPUs supports mwait
262 printk("using mwait in idle threads.\n");
265 pm_idle = mwait_idle;
270 static int __init idle_setup (char *str)
272 if (!strncmp(str, "poll", 4)) {
273 printk("using polling idle threads.\n");
277 boot_option_idle_override = 1;
281 __setup("idle=", idle_setup);
283 /* Prints also some state that isn't saved in the pt_regs */
284 void __show_regs(struct pt_regs * regs)
286 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
287 unsigned int fsindex,gsindex;
288 unsigned int ds,cs,es;
292 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
293 current->pid, current->comm, print_tainted(),
294 system_utsname.release,
295 (int)strcspn(system_utsname.version, " "),
296 system_utsname.version);
297 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
298 printk_address(regs->rip);
299 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
301 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
302 regs->rax, regs->rbx, regs->rcx);
303 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
304 regs->rdx, regs->rsi, regs->rdi);
305 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
306 regs->rbp, regs->r8, regs->r9);
307 printk("R10: %016lx R11: %016lx R12: %016lx\n",
308 regs->r10, regs->r11, regs->r12);
309 printk("R13: %016lx R14: %016lx R15: %016lx\n",
310 regs->r13, regs->r14, regs->r15);
312 asm("movl %%ds,%0" : "=r" (ds));
313 asm("movl %%cs,%0" : "=r" (cs));
314 asm("movl %%es,%0" : "=r" (es));
315 asm("movl %%fs,%0" : "=r" (fsindex));
316 asm("movl %%gs,%0" : "=r" (gsindex));
318 rdmsrl(MSR_FS_BASE, fs);
319 rdmsrl(MSR_GS_BASE, gs);
320 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
322 asm("movq %%cr0, %0": "=r" (cr0));
323 asm("movq %%cr2, %0": "=r" (cr2));
324 asm("movq %%cr3, %0": "=r" (cr3));
325 asm("movq %%cr4, %0": "=r" (cr4));
327 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
328 fs,fsindex,gs,gsindex,shadowgs);
329 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
330 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
333 void show_regs(struct pt_regs *regs)
335 printk("CPU %d:", smp_processor_id());
337 show_trace(NULL, regs, (void *)(regs + 1));
341 * Free current thread data structures etc..
343 void exit_thread(void)
345 struct task_struct *me = current;
346 struct thread_struct *t = &me->thread;
348 if (me->thread.io_bitmap_ptr) {
349 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
351 kfree(t->io_bitmap_ptr);
352 t->io_bitmap_ptr = NULL;
354 * Careful, clear this in the TSS too:
356 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
357 t->io_bitmap_max = 0;
362 void flush_thread(void)
364 struct task_struct *tsk = current;
365 struct thread_info *t = current_thread_info();
367 if (t->flags & _TIF_ABI_PENDING) {
368 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
369 if (t->flags & _TIF_IA32)
370 current_thread_info()->status |= TS_COMPAT;
373 tsk->thread.debugreg0 = 0;
374 tsk->thread.debugreg1 = 0;
375 tsk->thread.debugreg2 = 0;
376 tsk->thread.debugreg3 = 0;
377 tsk->thread.debugreg6 = 0;
378 tsk->thread.debugreg7 = 0;
379 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
381 * Forget coprocessor state..
387 void release_thread(struct task_struct *dead_task)
390 if (dead_task->mm->context.size) {
391 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
393 dead_task->mm->context.ldt,
394 dead_task->mm->context.size);
400 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
402 struct user_desc ud = {
409 struct n_desc_struct *desc = (void *)t->thread.tls_array;
411 desc->a = LDT_entry_a(&ud);
412 desc->b = LDT_entry_b(&ud);
415 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
417 struct desc_struct *desc = (void *)t->thread.tls_array;
420 (((u32)desc->base1) << 16) |
421 (((u32)desc->base2) << 24);
425 * This gets called before we allocate a new thread and copy
426 * the current task into it.
428 void prepare_to_copy(struct task_struct *tsk)
433 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
434 unsigned long unused,
435 struct task_struct * p, struct pt_regs * regs)
438 struct pt_regs * childregs;
439 struct task_struct *me = current;
441 childregs = ((struct pt_regs *)
442 (THREAD_SIZE + task_stack_page(p))) - 1;
446 childregs->rsp = rsp;
448 childregs->rsp = (unsigned long)childregs;
450 p->thread.rsp = (unsigned long) childregs;
451 p->thread.rsp0 = (unsigned long) (childregs+1);
452 p->thread.userrsp = me->thread.userrsp;
454 set_tsk_thread_flag(p, TIF_FORK);
456 p->thread.fs = me->thread.fs;
457 p->thread.gs = me->thread.gs;
459 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
460 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
461 asm("mov %%es,%0" : "=m" (p->thread.es));
462 asm("mov %%ds,%0" : "=m" (p->thread.ds));
464 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
465 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
466 if (!p->thread.io_bitmap_ptr) {
467 p->thread.io_bitmap_max = 0;
470 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
475 * Set a new TLS for the child thread?
477 if (clone_flags & CLONE_SETTLS) {
478 #ifdef CONFIG_IA32_EMULATION
479 if (test_thread_flag(TIF_IA32))
480 err = ia32_child_tls(p, childregs);
483 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
489 if (err && p->thread.io_bitmap_ptr) {
490 kfree(p->thread.io_bitmap_ptr);
491 p->thread.io_bitmap_max = 0;
497 * This special macro can be used to load a debugging register
499 #define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
502 * switch_to(x,y) should switch tasks from x to y.
504 * This could still be optimized:
505 * - fold all the options into a flag word and test it with a single test.
506 * - could test fs/gs bitsliced
508 * Kprobes not supported here. Set the probe on schedule instead.
510 __kprobes struct task_struct *
511 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
513 struct thread_struct *prev = &prev_p->thread,
514 *next = &next_p->thread;
515 int cpu = smp_processor_id();
516 struct tss_struct *tss = &per_cpu(init_tss, cpu);
519 * Reload esp0, LDT and the page table pointer:
521 tss->rsp0 = next->rsp0;
525 * This won't pick up thread selector changes, but I guess that is ok.
527 asm volatile("mov %%es,%0" : "=m" (prev->es));
528 if (unlikely(next->es | prev->es))
529 loadsegment(es, next->es);
531 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
532 if (unlikely(next->ds | prev->ds))
533 loadsegment(ds, next->ds);
542 asm volatile("movl %%fs,%0" : "=r" (fsindex));
543 /* segment register != 0 always requires a reload.
544 also reload when it has changed.
545 when prev process used 64bit base always reload
546 to avoid an information leak. */
547 if (unlikely(fsindex | next->fsindex | prev->fs)) {
548 loadsegment(fs, next->fsindex);
549 /* check if the user used a selector != 0
550 * if yes clear 64bit base, since overloaded base
551 * is always mapped to the Null selector
556 /* when next process has a 64bit base use it */
558 wrmsrl(MSR_FS_BASE, next->fs);
559 prev->fsindex = fsindex;
563 asm volatile("movl %%gs,%0" : "=r" (gsindex));
564 if (unlikely(gsindex | next->gsindex | prev->gs)) {
565 load_gs_index(next->gsindex);
570 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
571 prev->gsindex = gsindex;
575 * Switch the PDA and FPU contexts.
577 prev->userrsp = read_pda(oldrsp);
578 write_pda(oldrsp, next->userrsp);
579 write_pda(pcurrent, next_p);
581 /* This must be here to ensure both math_state_restore() and
582 kernel_fpu_begin() work consistently.
583 And the AMD workaround requires it to be after DS reload. */
585 write_pda(kernelstack,
586 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
589 * Now maybe reload the debug registers
591 if (unlikely(next->debugreg7)) {
603 * Handle the IO bitmap
605 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
606 if (next->io_bitmap_ptr)
608 * Copy the relevant range of the IO bitmap.
609 * Normally this is 128 bytes or less:
611 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
612 max(prev->io_bitmap_max, next->io_bitmap_max));
615 * Clear any possible leftover bits:
617 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
625 * sys_execve() executes a new program.
628 long sys_execve(char __user *name, char __user * __user *argv,
629 char __user * __user *envp, struct pt_regs regs)
634 filename = getname(name);
635 error = PTR_ERR(filename);
636 if (IS_ERR(filename))
638 error = do_execve(filename, argv, envp, ®s);
641 current->ptrace &= ~PT_DTRACE;
642 task_unlock(current);
648 void set_personality_64bit(void)
650 /* inherit personality from parent */
652 /* Make sure to be in 64bit mode */
653 clear_thread_flag(TIF_IA32);
655 /* TBD: overwrites user setup. Should have two bits.
656 But 64bit processes have always behaved this way,
657 so it's not too bad. The main problem is just that
658 32bit childs are affected again. */
659 current->personality &= ~READ_IMPLIES_EXEC;
662 asmlinkage long sys_fork(struct pt_regs *regs)
664 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
668 sys_clone(unsigned long clone_flags, unsigned long newsp,
669 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
673 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
677 * This is trivial, and on the face of it looks like it
678 * could equally well be done in user mode.
680 * Not so, for quite unobvious reasons - register pressure.
681 * In user mode vfork() cannot have a stack frame, and if
682 * done by calling the "clone()" system call directly, you
683 * do not have enough call-clobbered registers to hold all
684 * the information you need.
686 asmlinkage long sys_vfork(struct pt_regs *regs)
688 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
692 unsigned long get_wchan(struct task_struct *p)
698 if (!p || p == current || p->state==TASK_RUNNING)
700 stack = (unsigned long)task_stack_page(p);
701 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
703 fp = *(u64 *)(p->thread.rsp);
705 if (fp < (unsigned long)stack ||
706 fp > (unsigned long)stack+THREAD_SIZE)
708 rip = *(u64 *)(fp+8);
709 if (!in_sched_functions(rip))
712 } while (count++ < 16);
716 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
719 int doit = task == current;
724 if (addr >= TASK_SIZE_OF(task))
727 /* handle small bases via the GDT because that's faster to
729 if (addr <= 0xffffffff) {
730 set_32bit_tls(task, GS_TLS, addr);
732 load_TLS(&task->thread, cpu);
733 load_gs_index(GS_TLS_SEL);
735 task->thread.gsindex = GS_TLS_SEL;
738 task->thread.gsindex = 0;
739 task->thread.gs = addr;
742 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
748 /* Not strictly needed for fs, but do it for symmetry
750 if (addr >= TASK_SIZE_OF(task))
753 /* handle small bases via the GDT because that's faster to
755 if (addr <= 0xffffffff) {
756 set_32bit_tls(task, FS_TLS, addr);
758 load_TLS(&task->thread, cpu);
759 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
761 task->thread.fsindex = FS_TLS_SEL;
764 task->thread.fsindex = 0;
765 task->thread.fs = addr;
767 /* set the selector to 0 to not confuse
769 asm volatile("movl %0,%%fs" :: "r" (0));
770 ret = checking_wrmsrl(MSR_FS_BASE, addr);
777 if (task->thread.fsindex == FS_TLS_SEL)
778 base = read_32bit_tls(task, FS_TLS);
780 rdmsrl(MSR_FS_BASE, base);
782 base = task->thread.fs;
783 ret = put_user(base, (unsigned long __user *)addr);
789 if (task->thread.gsindex == GS_TLS_SEL)
790 base = read_32bit_tls(task, GS_TLS);
792 asm("movl %%gs,%0" : "=r" (gsindex));
794 rdmsrl(MSR_KERNEL_GS_BASE, base);
796 base = task->thread.gs;
799 base = task->thread.gs;
800 ret = put_user(base, (unsigned long __user *)addr);
812 long sys_arch_prctl(int code, unsigned long addr)
814 return do_arch_prctl(current, code, addr);
818 * Capture the user space registers if the task is not running (in user space)
820 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
822 struct pt_regs *pp, ptregs;
824 pp = task_pt_regs(tsk);
830 elf_core_copy_regs(regs, &ptregs);
835 unsigned long arch_align_stack(unsigned long sp)
837 if (randomize_va_space)
838 sp -= get_random_int() % 8192;