2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
17 * This file handles the architecture-dependent parts of process handling..
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/module.h>
32 #include <linux/a.out.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/ptrace.h>
36 #include <linux/utsname.h>
37 #include <linux/random.h>
38 #include <linux/kprobes.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
48 #include <asm/prctl.h>
49 #include <asm/kdebug.h>
51 #include <asm/proto.h>
54 asmlinkage extern void ret_from_fork(void);
56 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
58 static atomic_t hlt_counter = ATOMIC_INIT(0);
60 unsigned long boot_option_idle_override = 0;
61 EXPORT_SYMBOL(boot_option_idle_override);
64 * Powermanagement idle function, if any..
66 void (*pm_idle)(void);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 void disable_hlt(void)
71 atomic_inc(&hlt_counter);
74 EXPORT_SYMBOL(disable_hlt);
78 atomic_dec(&hlt_counter);
81 EXPORT_SYMBOL(enable_hlt);
84 * We use this if we don't have any better
87 void default_idle(void)
91 if (!atomic_read(&hlt_counter)) {
92 clear_thread_flag(TIF_POLLING_NRFLAG);
93 smp_mb__after_clear_bit();
94 while (!need_resched()) {
101 set_thread_flag(TIF_POLLING_NRFLAG);
103 while (!need_resched())
109 * On SMP it's slightly faster (but much more power-consuming!)
110 * to poll the ->need_resched flag instead of waiting for the
111 * cross-CPU IPI to arrive. Use this option with caution.
113 static void poll_idle (void)
123 "i" (_TIF_NEED_RESCHED),
124 "m" (current_thread_info()->flags));
127 void cpu_idle_wait(void)
129 unsigned int cpu, this_cpu = get_cpu();
132 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
136 for_each_online_cpu(cpu) {
137 per_cpu(cpu_idle_state, cpu) = 1;
141 __get_cpu_var(cpu_idle_state) = 0;
146 for_each_online_cpu(cpu) {
147 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
150 cpus_and(map, map, cpu_online_map);
151 } while (!cpus_empty(map));
153 EXPORT_SYMBOL_GPL(cpu_idle_wait);
155 #ifdef CONFIG_HOTPLUG_CPU
156 DECLARE_PER_CPU(int, cpu_state);
159 /* We don't actually take CPU down, just spin without interrupts. */
160 static inline void play_dead(void)
166 __get_cpu_var(cpu_state) = CPU_DEAD;
172 static inline void play_dead(void)
176 #endif /* CONFIG_HOTPLUG_CPU */
179 * The idle thread. There's no useful work to be
180 * done, so just try to conserve power and have a
181 * low exit latency (ie sit in a loop waiting for
182 * somebody to say that they'd like to reschedule)
186 set_thread_flag(TIF_POLLING_NRFLAG);
188 /* endless idle loop with no priority at all */
190 while (!need_resched()) {
193 if (__get_cpu_var(cpu_idle_state))
194 __get_cpu_var(cpu_idle_state) = 0;
200 if (cpu_is_offline(smp_processor_id()))
205 preempt_enable_no_resched();
212 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
213 * which can obviate IPI to trigger checking of need_resched.
214 * We execute MONITOR against need_resched and enter optimized wait state
215 * through MWAIT. Whenever someone changes need_resched, we would be woken
216 * up from MWAIT (without an IPI).
218 static void mwait_idle(void)
222 while (!need_resched()) {
223 __monitor((void *)¤t_thread_info()->flags, 0, 0);
231 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
234 if (cpu_has(c, X86_FEATURE_MWAIT)) {
236 * Skip, if setup has overridden idle.
237 * One CPU supports mwait => All CPUs supports mwait
241 printk("using mwait in idle threads.\n");
244 pm_idle = mwait_idle;
249 static int __init idle_setup (char *str)
251 if (!strncmp(str, "poll", 4)) {
252 printk("using polling idle threads.\n");
256 boot_option_idle_override = 1;
260 __setup("idle=", idle_setup);
262 /* Prints also some state that isn't saved in the pt_regs */
263 void __show_regs(struct pt_regs * regs)
265 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
266 unsigned int fsindex,gsindex;
267 unsigned int ds,cs,es;
271 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
272 current->pid, current->comm, print_tainted(),
273 system_utsname.release,
274 (int)strcspn(system_utsname.version, " "),
275 system_utsname.version);
276 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
277 printk_address(regs->rip);
278 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
279 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
280 regs->rax, regs->rbx, regs->rcx);
281 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
282 regs->rdx, regs->rsi, regs->rdi);
283 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
284 regs->rbp, regs->r8, regs->r9);
285 printk("R10: %016lx R11: %016lx R12: %016lx\n",
286 regs->r10, regs->r11, regs->r12);
287 printk("R13: %016lx R14: %016lx R15: %016lx\n",
288 regs->r13, regs->r14, regs->r15);
290 asm("movl %%ds,%0" : "=r" (ds));
291 asm("movl %%cs,%0" : "=r" (cs));
292 asm("movl %%es,%0" : "=r" (es));
293 asm("movl %%fs,%0" : "=r" (fsindex));
294 asm("movl %%gs,%0" : "=r" (gsindex));
296 rdmsrl(MSR_FS_BASE, fs);
297 rdmsrl(MSR_GS_BASE, gs);
298 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
300 asm("movq %%cr0, %0": "=r" (cr0));
301 asm("movq %%cr2, %0": "=r" (cr2));
302 asm("movq %%cr3, %0": "=r" (cr3));
303 asm("movq %%cr4, %0": "=r" (cr4));
305 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
306 fs,fsindex,gs,gsindex,shadowgs);
307 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
308 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
311 void show_regs(struct pt_regs *regs)
313 printk("CPU %d:", smp_processor_id());
315 show_trace(®s->rsp);
319 * Free current thread data structures etc..
321 void exit_thread(void)
323 struct task_struct *me = current;
324 struct thread_struct *t = &me->thread;
327 * Remove function-return probe instances associated with this task
328 * and put them back on the free list. Do not insert an exit probe for
329 * this function, it will be disabled by kprobe_flush_task if you do.
331 kprobe_flush_task(me);
333 if (me->thread.io_bitmap_ptr) {
334 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
336 kfree(t->io_bitmap_ptr);
337 t->io_bitmap_ptr = NULL;
339 * Careful, clear this in the TSS too:
341 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
342 t->io_bitmap_max = 0;
347 void flush_thread(void)
349 struct task_struct *tsk = current;
350 struct thread_info *t = current_thread_info();
353 * Remove function-return probe instances associated with this task
354 * and put them back on the free list. Do not insert an exit probe for
355 * this function, it will be disabled by kprobe_flush_task if you do.
357 kprobe_flush_task(tsk);
359 if (t->flags & _TIF_ABI_PENDING)
360 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
362 tsk->thread.debugreg0 = 0;
363 tsk->thread.debugreg1 = 0;
364 tsk->thread.debugreg2 = 0;
365 tsk->thread.debugreg3 = 0;
366 tsk->thread.debugreg6 = 0;
367 tsk->thread.debugreg7 = 0;
368 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
370 * Forget coprocessor state..
376 void release_thread(struct task_struct *dead_task)
379 if (dead_task->mm->context.size) {
380 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
382 dead_task->mm->context.ldt,
383 dead_task->mm->context.size);
389 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
391 struct user_desc ud = {
398 struct n_desc_struct *desc = (void *)t->thread.tls_array;
400 desc->a = LDT_entry_a(&ud);
401 desc->b = LDT_entry_b(&ud);
404 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
406 struct desc_struct *desc = (void *)t->thread.tls_array;
409 (((u32)desc->base1) << 16) |
410 (((u32)desc->base2) << 24);
414 * This gets called before we allocate a new thread and copy
415 * the current task into it.
417 void prepare_to_copy(struct task_struct *tsk)
422 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
423 unsigned long unused,
424 struct task_struct * p, struct pt_regs * regs)
427 struct pt_regs * childregs;
428 struct task_struct *me = current;
430 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
435 childregs->rsp = rsp;
437 childregs->rsp = (unsigned long)childregs;
440 p->thread.rsp = (unsigned long) childregs;
441 p->thread.rsp0 = (unsigned long) (childregs+1);
442 p->thread.userrsp = me->thread.userrsp;
444 set_ti_thread_flag(p->thread_info, TIF_FORK);
446 p->thread.fs = me->thread.fs;
447 p->thread.gs = me->thread.gs;
449 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
450 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
451 asm("mov %%es,%0" : "=m" (p->thread.es));
452 asm("mov %%ds,%0" : "=m" (p->thread.ds));
454 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
455 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
456 if (!p->thread.io_bitmap_ptr) {
457 p->thread.io_bitmap_max = 0;
460 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
464 * Set a new TLS for the child thread?
466 if (clone_flags & CLONE_SETTLS) {
467 #ifdef CONFIG_IA32_EMULATION
468 if (test_thread_flag(TIF_IA32))
469 err = ia32_child_tls(p, childregs);
472 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
478 if (err && p->thread.io_bitmap_ptr) {
479 kfree(p->thread.io_bitmap_ptr);
480 p->thread.io_bitmap_max = 0;
486 * This special macro can be used to load a debugging register
488 #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
491 * switch_to(x,y) should switch tasks from x to y.
493 * This could still be optimized:
494 * - fold all the options into a flag word and test it with a single test.
495 * - could test fs/gs bitsliced
497 struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
499 struct thread_struct *prev = &prev_p->thread,
500 *next = &next_p->thread;
501 int cpu = smp_processor_id();
502 struct tss_struct *tss = &per_cpu(init_tss, cpu);
507 * Reload esp0, LDT and the page table pointer:
509 tss->rsp0 = next->rsp0;
513 * This won't pick up thread selector changes, but I guess that is ok.
515 asm volatile("mov %%es,%0" : "=m" (prev->es));
516 if (unlikely(next->es | prev->es))
517 loadsegment(es, next->es);
519 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
520 if (unlikely(next->ds | prev->ds))
521 loadsegment(ds, next->ds);
530 asm volatile("movl %%fs,%0" : "=r" (fsindex));
531 /* segment register != 0 always requires a reload.
532 also reload when it has changed.
533 when prev process used 64bit base always reload
534 to avoid an information leak. */
535 if (unlikely(fsindex | next->fsindex | prev->fs)) {
536 loadsegment(fs, next->fsindex);
537 /* check if the user used a selector != 0
538 * if yes clear 64bit base, since overloaded base
539 * is always mapped to the Null selector
544 /* when next process has a 64bit base use it */
546 wrmsrl(MSR_FS_BASE, next->fs);
547 prev->fsindex = fsindex;
551 asm volatile("movl %%gs,%0" : "=r" (gsindex));
552 if (unlikely(gsindex | next->gsindex | prev->gs)) {
553 load_gs_index(next->gsindex);
558 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
559 prev->gsindex = gsindex;
563 * Switch the PDA context.
565 prev->userrsp = read_pda(oldrsp);
566 write_pda(oldrsp, next->userrsp);
567 write_pda(pcurrent, next_p);
568 write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
571 * Now maybe reload the debug registers
573 if (unlikely(next->debugreg7)) {
585 * Handle the IO bitmap
587 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
588 if (next->io_bitmap_ptr)
590 * Copy the relevant range of the IO bitmap.
591 * Normally this is 128 bytes or less:
593 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
594 max(prev->io_bitmap_max, next->io_bitmap_max));
597 * Clear any possible leftover bits:
599 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
607 * sys_execve() executes a new program.
610 long sys_execve(char __user *name, char __user * __user *argv,
611 char __user * __user *envp, struct pt_regs regs)
616 filename = getname(name);
617 error = PTR_ERR(filename);
618 if (IS_ERR(filename))
620 error = do_execve(filename, argv, envp, ®s);
623 current->ptrace &= ~PT_DTRACE;
624 task_unlock(current);
630 void set_personality_64bit(void)
632 /* inherit personality from parent */
634 /* Make sure to be in 64bit mode */
635 clear_thread_flag(TIF_IA32);
637 /* TBD: overwrites user setup. Should have two bits.
638 But 64bit processes have always behaved this way,
639 so it's not too bad. The main problem is just that
640 32bit childs are affected again. */
641 current->personality &= ~READ_IMPLIES_EXEC;
644 asmlinkage long sys_fork(struct pt_regs *regs)
646 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
649 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
653 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
657 * This is trivial, and on the face of it looks like it
658 * could equally well be done in user mode.
660 * Not so, for quite unobvious reasons - register pressure.
661 * In user mode vfork() cannot have a stack frame, and if
662 * done by calling the "clone()" system call directly, you
663 * do not have enough call-clobbered registers to hold all
664 * the information you need.
666 asmlinkage long sys_vfork(struct pt_regs *regs)
668 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
672 unsigned long get_wchan(struct task_struct *p)
678 if (!p || p == current || p->state==TASK_RUNNING)
680 stack = (unsigned long)p->thread_info;
681 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
683 fp = *(u64 *)(p->thread.rsp);
685 if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
687 rip = *(u64 *)(fp+8);
688 if (!in_sched_functions(rip))
691 } while (count++ < 16);
695 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
698 int doit = task == current;
703 if (addr >= TASK_SIZE_OF(task))
706 /* handle small bases via the GDT because that's faster to
708 if (addr <= 0xffffffff) {
709 set_32bit_tls(task, GS_TLS, addr);
711 load_TLS(&task->thread, cpu);
712 load_gs_index(GS_TLS_SEL);
714 task->thread.gsindex = GS_TLS_SEL;
717 task->thread.gsindex = 0;
718 task->thread.gs = addr;
721 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
727 /* Not strictly needed for fs, but do it for symmetry
729 if (addr >= TASK_SIZE_OF(task))
732 /* handle small bases via the GDT because that's faster to
734 if (addr <= 0xffffffff) {
735 set_32bit_tls(task, FS_TLS, addr);
737 load_TLS(&task->thread, cpu);
738 asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
740 task->thread.fsindex = FS_TLS_SEL;
743 task->thread.fsindex = 0;
744 task->thread.fs = addr;
746 /* set the selector to 0 to not confuse
748 asm volatile("movl %0,%%fs" :: "r" (0));
749 ret = checking_wrmsrl(MSR_FS_BASE, addr);
756 if (task->thread.fsindex == FS_TLS_SEL)
757 base = read_32bit_tls(task, FS_TLS);
759 rdmsrl(MSR_FS_BASE, base);
761 base = task->thread.fs;
762 ret = put_user(base, (unsigned long __user *)addr);
767 if (task->thread.gsindex == GS_TLS_SEL)
768 base = read_32bit_tls(task, GS_TLS);
770 rdmsrl(MSR_KERNEL_GS_BASE, base);
772 base = task->thread.gs;
773 ret = put_user(base, (unsigned long __user *)addr);
785 long sys_arch_prctl(int code, unsigned long addr)
787 return do_arch_prctl(current, code, addr);
791 * Capture the user space registers if the task is not running (in user space)
793 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
795 struct pt_regs *pp, ptregs;
797 pp = (struct pt_regs *)(tsk->thread.rsp0);
804 elf_core_copy_regs(regs, &ptregs);
809 unsigned long arch_align_stack(unsigned long sp)
811 if (randomize_va_space)
812 sp -= get_random_int() % 8192;