2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
17 * This file handles the architecture-dependent parts of process handling..
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/module.h>
32 #include <linux/a.out.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/irq.h>
36 #include <linux/ptrace.h>
37 #include <linux/utsname.h>
38 #include <linux/random.h>
39 #include <linux/kprobes.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
49 #include <asm/prctl.h>
50 #include <asm/kdebug.h>
52 #include <asm/proto.h>
55 asmlinkage extern void ret_from_fork(void);
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59 static atomic_t hlt_counter = ATOMIC_INIT(0);
61 unsigned long boot_option_idle_override = 0;
62 EXPORT_SYMBOL(boot_option_idle_override);
65 * Powermanagement idle function, if any..
67 void (*pm_idle)(void);
68 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
70 void disable_hlt(void)
72 atomic_inc(&hlt_counter);
75 EXPORT_SYMBOL(disable_hlt);
79 atomic_dec(&hlt_counter);
82 EXPORT_SYMBOL(enable_hlt);
85 * We use this if we don't have any better
88 void default_idle(void)
90 if (!atomic_read(&hlt_counter)) {
100 * On SMP it's slightly faster (but much more power-consuming!)
101 * to poll the ->need_resched flag instead of waiting for the
102 * cross-CPU IPI to arrive. Use this option with caution.
104 static void poll_idle (void)
111 * Deal with another CPU just having chosen a thread to
114 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
117 set_thread_flag(TIF_POLLING_NRFLAG);
124 "i" (_TIF_NEED_RESCHED),
125 "m" (current_thread_info()->flags));
131 void cpu_idle_wait(void)
133 unsigned int cpu, this_cpu = get_cpu();
136 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
140 for_each_online_cpu(cpu) {
141 per_cpu(cpu_idle_state, cpu) = 1;
145 __get_cpu_var(cpu_idle_state) = 0;
150 for_each_online_cpu(cpu) {
151 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
154 cpus_and(map, map, cpu_online_map);
155 } while (!cpus_empty(map));
157 EXPORT_SYMBOL_GPL(cpu_idle_wait);
159 #ifdef CONFIG_HOTPLUG_CPU
160 DECLARE_PER_CPU(int, cpu_state);
163 /* We don't actually take CPU down, just spin without interrupts. */
164 static inline void play_dead(void)
170 __get_cpu_var(cpu_state) = CPU_DEAD;
176 static inline void play_dead(void)
180 #endif /* CONFIG_HOTPLUG_CPU */
183 * The idle thread. There's no useful work to be
184 * done, so just try to conserve power and have a
185 * low exit latency (ie sit in a loop waiting for
186 * somebody to say that they'd like to reschedule)
190 /* endless idle loop with no priority at all */
192 while (!need_resched()) {
195 if (__get_cpu_var(cpu_idle_state))
196 __get_cpu_var(cpu_idle_state) = 0;
202 if (cpu_is_offline(smp_processor_id()))
212 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
213 * which can obviate IPI to trigger checking of need_resched.
214 * We execute MONITOR against need_resched and enter optimized wait state
215 * through MWAIT. Whenever someone changes need_resched, we would be woken
216 * up from MWAIT (without an IPI).
218 static void mwait_idle(void)
222 if (!need_resched()) {
223 set_thread_flag(TIF_POLLING_NRFLAG);
225 __monitor((void *)¤t_thread_info()->flags, 0, 0);
229 } while (!need_resched());
230 clear_thread_flag(TIF_POLLING_NRFLAG);
234 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
237 if (cpu_has(c, X86_FEATURE_MWAIT)) {
239 * Skip, if setup has overridden idle.
240 * One CPU supports mwait => All CPUs supports mwait
244 printk("using mwait in idle threads.\n");
247 pm_idle = mwait_idle;
252 static int __init idle_setup (char *str)
254 if (!strncmp(str, "poll", 4)) {
255 printk("using polling idle threads.\n");
259 boot_option_idle_override = 1;
263 __setup("idle=", idle_setup);
265 /* Prints also some state that isn't saved in the pt_regs */
266 void __show_regs(struct pt_regs * regs)
268 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
269 unsigned int fsindex,gsindex;
270 unsigned int ds,cs,es;
274 printk("Pid: %d, comm: %.20s %s %s\n",
275 current->pid, current->comm, print_tainted(), system_utsname.release);
276 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
277 printk_address(regs->rip);
278 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
279 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
280 regs->rax, regs->rbx, regs->rcx);
281 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
282 regs->rdx, regs->rsi, regs->rdi);
283 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
284 regs->rbp, regs->r8, regs->r9);
285 printk("R10: %016lx R11: %016lx R12: %016lx\n",
286 regs->r10, regs->r11, regs->r12);
287 printk("R13: %016lx R14: %016lx R15: %016lx\n",
288 regs->r13, regs->r14, regs->r15);
290 asm("movl %%ds,%0" : "=r" (ds));
291 asm("movl %%cs,%0" : "=r" (cs));
292 asm("movl %%es,%0" : "=r" (es));
293 asm("movl %%fs,%0" : "=r" (fsindex));
294 asm("movl %%gs,%0" : "=r" (gsindex));
296 rdmsrl(MSR_FS_BASE, fs);
297 rdmsrl(MSR_GS_BASE, gs);
298 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
300 asm("movq %%cr0, %0": "=r" (cr0));
301 asm("movq %%cr2, %0": "=r" (cr2));
302 asm("movq %%cr3, %0": "=r" (cr3));
303 asm("movq %%cr4, %0": "=r" (cr4));
305 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
306 fs,fsindex,gs,gsindex,shadowgs);
307 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
308 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
311 void show_regs(struct pt_regs *regs)
314 show_trace(®s->rsp);
318 * Free current thread data structures etc..
320 void exit_thread(void)
322 struct task_struct *me = current;
323 struct thread_struct *t = &me->thread;
326 * Remove function-return probe instances associated with this task
327 * and put them back on the free list. Do not insert an exit probe for
328 * this function, it will be disabled by kprobe_flush_task if you do.
330 kprobe_flush_task(me);
332 if (me->thread.io_bitmap_ptr) {
333 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
335 kfree(t->io_bitmap_ptr);
336 t->io_bitmap_ptr = NULL;
338 * Careful, clear this in the TSS too:
340 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
341 t->io_bitmap_max = 0;
346 void flush_thread(void)
348 struct task_struct *tsk = current;
349 struct thread_info *t = current_thread_info();
352 * Remove function-return probe instances associated with this task
353 * and put them back on the free list. Do not insert an exit probe for
354 * this function, it will be disabled by kprobe_flush_task if you do.
356 kprobe_flush_task(tsk);
358 if (t->flags & _TIF_ABI_PENDING)
359 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
361 tsk->thread.debugreg0 = 0;
362 tsk->thread.debugreg1 = 0;
363 tsk->thread.debugreg2 = 0;
364 tsk->thread.debugreg3 = 0;
365 tsk->thread.debugreg6 = 0;
366 tsk->thread.debugreg7 = 0;
367 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
369 * Forget coprocessor state..
375 void release_thread(struct task_struct *dead_task)
378 if (dead_task->mm->context.size) {
379 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
381 dead_task->mm->context.ldt,
382 dead_task->mm->context.size);
388 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
390 struct user_desc ud = {
397 struct n_desc_struct *desc = (void *)t->thread.tls_array;
399 desc->a = LDT_entry_a(&ud);
400 desc->b = LDT_entry_b(&ud);
403 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
405 struct desc_struct *desc = (void *)t->thread.tls_array;
408 (((u32)desc->base1) << 16) |
409 (((u32)desc->base2) << 24);
413 * This gets called before we allocate a new thread and copy
414 * the current task into it.
416 void prepare_to_copy(struct task_struct *tsk)
421 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
422 unsigned long unused,
423 struct task_struct * p, struct pt_regs * regs)
426 struct pt_regs * childregs;
427 struct task_struct *me = current;
429 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
434 childregs->rsp = rsp;
436 childregs->rsp = (unsigned long)childregs;
439 p->thread.rsp = (unsigned long) childregs;
440 p->thread.rsp0 = (unsigned long) (childregs+1);
441 p->thread.userrsp = me->thread.userrsp;
443 set_ti_thread_flag(p->thread_info, TIF_FORK);
445 p->thread.fs = me->thread.fs;
446 p->thread.gs = me->thread.gs;
448 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
449 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
450 asm("mov %%es,%0" : "=m" (p->thread.es));
451 asm("mov %%ds,%0" : "=m" (p->thread.ds));
453 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
454 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
455 if (!p->thread.io_bitmap_ptr) {
456 p->thread.io_bitmap_max = 0;
459 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
463 * Set a new TLS for the child thread?
465 if (clone_flags & CLONE_SETTLS) {
466 #ifdef CONFIG_IA32_EMULATION
467 if (test_thread_flag(TIF_IA32))
468 err = ia32_child_tls(p, childregs);
471 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
477 if (err && p->thread.io_bitmap_ptr) {
478 kfree(p->thread.io_bitmap_ptr);
479 p->thread.io_bitmap_max = 0;
485 * This special macro can be used to load a debugging register
487 #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
490 * switch_to(x,y) should switch tasks from x to y.
492 * This could still be optimized:
493 * - fold all the options into a flag word and test it with a single test.
494 * - could test fs/gs bitsliced
496 struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
498 struct thread_struct *prev = &prev_p->thread,
499 *next = &next_p->thread;
500 int cpu = smp_processor_id();
501 struct tss_struct *tss = &per_cpu(init_tss, cpu);
506 * Reload esp0, LDT and the page table pointer:
508 tss->rsp0 = next->rsp0;
512 * This won't pick up thread selector changes, but I guess that is ok.
514 asm volatile("mov %%es,%0" : "=m" (prev->es));
515 if (unlikely(next->es | prev->es))
516 loadsegment(es, next->es);
518 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
519 if (unlikely(next->ds | prev->ds))
520 loadsegment(ds, next->ds);
529 asm volatile("movl %%fs,%0" : "=r" (fsindex));
530 /* segment register != 0 always requires a reload.
531 also reload when it has changed.
532 when prev process used 64bit base always reload
533 to avoid an information leak. */
534 if (unlikely(fsindex | next->fsindex | prev->fs)) {
535 loadsegment(fs, next->fsindex);
536 /* check if the user used a selector != 0
537 * if yes clear 64bit base, since overloaded base
538 * is always mapped to the Null selector
543 /* when next process has a 64bit base use it */
545 wrmsrl(MSR_FS_BASE, next->fs);
546 prev->fsindex = fsindex;
550 asm volatile("movl %%gs,%0" : "=r" (gsindex));
551 if (unlikely(gsindex | next->gsindex | prev->gs)) {
552 load_gs_index(next->gsindex);
557 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
558 prev->gsindex = gsindex;
562 * Switch the PDA context.
564 prev->userrsp = read_pda(oldrsp);
565 write_pda(oldrsp, next->userrsp);
566 write_pda(pcurrent, next_p);
567 write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
570 * Now maybe reload the debug registers
572 if (unlikely(next->debugreg7)) {
584 * Handle the IO bitmap
586 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
587 if (next->io_bitmap_ptr)
589 * Copy the relevant range of the IO bitmap.
590 * Normally this is 128 bytes or less:
592 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
593 max(prev->io_bitmap_max, next->io_bitmap_max));
596 * Clear any possible leftover bits:
598 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
606 * sys_execve() executes a new program.
609 long sys_execve(char __user *name, char __user * __user *argv,
610 char __user * __user *envp, struct pt_regs regs)
615 filename = getname(name);
616 error = PTR_ERR(filename);
617 if (IS_ERR(filename))
619 error = do_execve(filename, argv, envp, ®s);
622 current->ptrace &= ~PT_DTRACE;
623 task_unlock(current);
629 void set_personality_64bit(void)
631 /* inherit personality from parent */
633 /* Make sure to be in 64bit mode */
634 clear_thread_flag(TIF_IA32);
636 /* TBD: overwrites user setup. Should have two bits.
637 But 64bit processes have always behaved this way,
638 so it's not too bad. The main problem is just that
639 32bit childs are affected again. */
640 current->personality &= ~READ_IMPLIES_EXEC;
643 asmlinkage long sys_fork(struct pt_regs *regs)
645 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
648 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
652 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
656 * This is trivial, and on the face of it looks like it
657 * could equally well be done in user mode.
659 * Not so, for quite unobvious reasons - register pressure.
660 * In user mode vfork() cannot have a stack frame, and if
661 * done by calling the "clone()" system call directly, you
662 * do not have enough call-clobbered registers to hold all
663 * the information you need.
665 asmlinkage long sys_vfork(struct pt_regs *regs)
667 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
671 unsigned long get_wchan(struct task_struct *p)
677 if (!p || p == current || p->state==TASK_RUNNING)
679 stack = (unsigned long)p->thread_info;
680 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
682 fp = *(u64 *)(p->thread.rsp);
684 if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
686 rip = *(u64 *)(fp+8);
687 if (!in_sched_functions(rip))
690 } while (count++ < 16);
694 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
697 int doit = task == current;
702 if (addr >= TASK_SIZE_OF(task))
705 /* handle small bases via the GDT because that's faster to
707 if (addr <= 0xffffffff) {
708 set_32bit_tls(task, GS_TLS, addr);
710 load_TLS(&task->thread, cpu);
711 load_gs_index(GS_TLS_SEL);
713 task->thread.gsindex = GS_TLS_SEL;
716 task->thread.gsindex = 0;
717 task->thread.gs = addr;
720 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
726 /* Not strictly needed for fs, but do it for symmetry
728 if (addr >= TASK_SIZE_OF(task))
731 /* handle small bases via the GDT because that's faster to
733 if (addr <= 0xffffffff) {
734 set_32bit_tls(task, FS_TLS, addr);
736 load_TLS(&task->thread, cpu);
737 asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
739 task->thread.fsindex = FS_TLS_SEL;
742 task->thread.fsindex = 0;
743 task->thread.fs = addr;
745 /* set the selector to 0 to not confuse
747 asm volatile("movl %0,%%fs" :: "r" (0));
748 ret = checking_wrmsrl(MSR_FS_BASE, addr);
755 if (task->thread.fsindex == FS_TLS_SEL)
756 base = read_32bit_tls(task, FS_TLS);
758 rdmsrl(MSR_FS_BASE, base);
760 base = task->thread.fs;
761 ret = put_user(base, (unsigned long __user *)addr);
766 if (task->thread.gsindex == GS_TLS_SEL)
767 base = read_32bit_tls(task, GS_TLS);
769 rdmsrl(MSR_KERNEL_GS_BASE, base);
771 base = task->thread.gs;
772 ret = put_user(base, (unsigned long __user *)addr);
784 long sys_arch_prctl(int code, unsigned long addr)
786 return do_arch_prctl(current, code, addr);
790 * Capture the user space registers if the task is not running (in user space)
792 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
794 struct pt_regs *pp, ptregs;
796 pp = (struct pt_regs *)(tsk->thread.rsp0);
803 elf_core_copy_regs(regs, &ptregs);
808 unsigned long arch_align_stack(unsigned long sp)
810 if (randomize_va_space)
811 sp -= get_random_int() % 8192;