2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
17 * This file handles the architecture-dependent parts of process handling..
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/module.h>
32 #include <linux/a.out.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/ptrace.h>
36 #include <linux/utsname.h>
37 #include <linux/random.h>
38 #include <linux/kprobes.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
48 #include <asm/prctl.h>
49 #include <asm/kdebug.h>
51 #include <asm/proto.h>
54 asmlinkage extern void ret_from_fork(void);
56 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
58 static atomic_t hlt_counter = ATOMIC_INIT(0);
60 unsigned long boot_option_idle_override = 0;
61 EXPORT_SYMBOL(boot_option_idle_override);
64 * Powermanagement idle function, if any..
66 void (*pm_idle)(void);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 void disable_hlt(void)
71 atomic_inc(&hlt_counter);
74 EXPORT_SYMBOL(disable_hlt);
78 atomic_dec(&hlt_counter);
81 EXPORT_SYMBOL(enable_hlt);
84 * We use this if we don't have any better
87 void default_idle(void)
89 if (!atomic_read(&hlt_counter)) {
99 * On SMP it's slightly faster (but much more power-consuming!)
100 * to poll the ->need_resched flag instead of waiting for the
101 * cross-CPU IPI to arrive. Use this option with caution.
103 static void poll_idle (void)
110 * Deal with another CPU just having chosen a thread to
113 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
116 set_thread_flag(TIF_POLLING_NRFLAG);
123 "i" (_TIF_NEED_RESCHED),
124 "m" (current_thread_info()->flags));
125 clear_thread_flag(TIF_POLLING_NRFLAG);
131 void cpu_idle_wait(void)
133 unsigned int cpu, this_cpu = get_cpu();
136 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
140 for_each_online_cpu(cpu) {
141 per_cpu(cpu_idle_state, cpu) = 1;
145 __get_cpu_var(cpu_idle_state) = 0;
150 for_each_online_cpu(cpu) {
151 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
154 cpus_and(map, map, cpu_online_map);
155 } while (!cpus_empty(map));
157 EXPORT_SYMBOL_GPL(cpu_idle_wait);
159 #ifdef CONFIG_HOTPLUG_CPU
160 DECLARE_PER_CPU(int, cpu_state);
163 /* We don't actually take CPU down, just spin without interrupts. */
164 static inline void play_dead(void)
170 __get_cpu_var(cpu_state) = CPU_DEAD;
176 static inline void play_dead(void)
180 #endif /* CONFIG_HOTPLUG_CPU */
183 * The idle thread. There's no useful work to be
184 * done, so just try to conserve power and have a
185 * low exit latency (ie sit in a loop waiting for
186 * somebody to say that they'd like to reschedule)
190 /* endless idle loop with no priority at all */
192 while (!need_resched()) {
195 if (__get_cpu_var(cpu_idle_state))
196 __get_cpu_var(cpu_idle_state) = 0;
202 if (cpu_is_offline(smp_processor_id()))
212 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
213 * which can obviate IPI to trigger checking of need_resched.
214 * We execute MONITOR against need_resched and enter optimized wait state
215 * through MWAIT. Whenever someone changes need_resched, we would be woken
216 * up from MWAIT (without an IPI).
218 static void mwait_idle(void)
222 if (!need_resched()) {
223 set_thread_flag(TIF_POLLING_NRFLAG);
225 __monitor((void *)¤t_thread_info()->flags, 0, 0);
229 } while (!need_resched());
230 clear_thread_flag(TIF_POLLING_NRFLAG);
234 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
237 if (cpu_has(c, X86_FEATURE_MWAIT)) {
239 * Skip, if setup has overridden idle.
240 * One CPU supports mwait => All CPUs supports mwait
244 printk("using mwait in idle threads.\n");
247 pm_idle = mwait_idle;
252 static int __init idle_setup (char *str)
254 if (!strncmp(str, "poll", 4)) {
255 printk("using polling idle threads.\n");
259 boot_option_idle_override = 1;
263 __setup("idle=", idle_setup);
265 /* Prints also some state that isn't saved in the pt_regs */
266 void __show_regs(struct pt_regs * regs)
268 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
269 unsigned int fsindex,gsindex;
270 unsigned int ds,cs,es;
274 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
275 current->pid, current->comm, print_tainted(),
276 system_utsname.release,
277 (int)strcspn(system_utsname.version, " "),
278 system_utsname.version);
279 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
280 printk_address(regs->rip);
281 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
282 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
283 regs->rax, regs->rbx, regs->rcx);
284 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
285 regs->rdx, regs->rsi, regs->rdi);
286 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
287 regs->rbp, regs->r8, regs->r9);
288 printk("R10: %016lx R11: %016lx R12: %016lx\n",
289 regs->r10, regs->r11, regs->r12);
290 printk("R13: %016lx R14: %016lx R15: %016lx\n",
291 regs->r13, regs->r14, regs->r15);
293 asm("movl %%ds,%0" : "=r" (ds));
294 asm("movl %%cs,%0" : "=r" (cs));
295 asm("movl %%es,%0" : "=r" (es));
296 asm("movl %%fs,%0" : "=r" (fsindex));
297 asm("movl %%gs,%0" : "=r" (gsindex));
299 rdmsrl(MSR_FS_BASE, fs);
300 rdmsrl(MSR_GS_BASE, gs);
301 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
303 asm("movq %%cr0, %0": "=r" (cr0));
304 asm("movq %%cr2, %0": "=r" (cr2));
305 asm("movq %%cr3, %0": "=r" (cr3));
306 asm("movq %%cr4, %0": "=r" (cr4));
308 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
309 fs,fsindex,gs,gsindex,shadowgs);
310 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
311 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
314 void show_regs(struct pt_regs *regs)
316 printk("CPU %d:", smp_processor_id());
318 show_trace(®s->rsp);
322 * Free current thread data structures etc..
324 void exit_thread(void)
326 struct task_struct *me = current;
327 struct thread_struct *t = &me->thread;
330 * Remove function-return probe instances associated with this task
331 * and put them back on the free list. Do not insert an exit probe for
332 * this function, it will be disabled by kprobe_flush_task if you do.
334 kprobe_flush_task(me);
336 if (me->thread.io_bitmap_ptr) {
337 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
339 kfree(t->io_bitmap_ptr);
340 t->io_bitmap_ptr = NULL;
342 * Careful, clear this in the TSS too:
344 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
345 t->io_bitmap_max = 0;
350 void flush_thread(void)
352 struct task_struct *tsk = current;
353 struct thread_info *t = current_thread_info();
356 * Remove function-return probe instances associated with this task
357 * and put them back on the free list. Do not insert an exit probe for
358 * this function, it will be disabled by kprobe_flush_task if you do.
360 kprobe_flush_task(tsk);
362 if (t->flags & _TIF_ABI_PENDING)
363 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
365 tsk->thread.debugreg0 = 0;
366 tsk->thread.debugreg1 = 0;
367 tsk->thread.debugreg2 = 0;
368 tsk->thread.debugreg3 = 0;
369 tsk->thread.debugreg6 = 0;
370 tsk->thread.debugreg7 = 0;
371 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
373 * Forget coprocessor state..
379 void release_thread(struct task_struct *dead_task)
382 if (dead_task->mm->context.size) {
383 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
385 dead_task->mm->context.ldt,
386 dead_task->mm->context.size);
392 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
394 struct user_desc ud = {
401 struct n_desc_struct *desc = (void *)t->thread.tls_array;
403 desc->a = LDT_entry_a(&ud);
404 desc->b = LDT_entry_b(&ud);
407 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
409 struct desc_struct *desc = (void *)t->thread.tls_array;
412 (((u32)desc->base1) << 16) |
413 (((u32)desc->base2) << 24);
417 * This gets called before we allocate a new thread and copy
418 * the current task into it.
420 void prepare_to_copy(struct task_struct *tsk)
425 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
426 unsigned long unused,
427 struct task_struct * p, struct pt_regs * regs)
430 struct pt_regs * childregs;
431 struct task_struct *me = current;
433 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
438 childregs->rsp = rsp;
440 childregs->rsp = (unsigned long)childregs;
443 p->thread.rsp = (unsigned long) childregs;
444 p->thread.rsp0 = (unsigned long) (childregs+1);
445 p->thread.userrsp = me->thread.userrsp;
447 set_ti_thread_flag(p->thread_info, TIF_FORK);
449 p->thread.fs = me->thread.fs;
450 p->thread.gs = me->thread.gs;
452 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
453 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
454 asm("mov %%es,%0" : "=m" (p->thread.es));
455 asm("mov %%ds,%0" : "=m" (p->thread.ds));
457 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
458 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
459 if (!p->thread.io_bitmap_ptr) {
460 p->thread.io_bitmap_max = 0;
463 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
467 * Set a new TLS for the child thread?
469 if (clone_flags & CLONE_SETTLS) {
470 #ifdef CONFIG_IA32_EMULATION
471 if (test_thread_flag(TIF_IA32))
472 err = ia32_child_tls(p, childregs);
475 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
481 if (err && p->thread.io_bitmap_ptr) {
482 kfree(p->thread.io_bitmap_ptr);
483 p->thread.io_bitmap_max = 0;
489 * This special macro can be used to load a debugging register
491 #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
494 * switch_to(x,y) should switch tasks from x to y.
496 * This could still be optimized:
497 * - fold all the options into a flag word and test it with a single test.
498 * - could test fs/gs bitsliced
500 struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
502 struct thread_struct *prev = &prev_p->thread,
503 *next = &next_p->thread;
504 int cpu = smp_processor_id();
505 struct tss_struct *tss = &per_cpu(init_tss, cpu);
510 * Reload esp0, LDT and the page table pointer:
512 tss->rsp0 = next->rsp0;
516 * This won't pick up thread selector changes, but I guess that is ok.
518 asm volatile("mov %%es,%0" : "=m" (prev->es));
519 if (unlikely(next->es | prev->es))
520 loadsegment(es, next->es);
522 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
523 if (unlikely(next->ds | prev->ds))
524 loadsegment(ds, next->ds);
533 asm volatile("movl %%fs,%0" : "=r" (fsindex));
534 /* segment register != 0 always requires a reload.
535 also reload when it has changed.
536 when prev process used 64bit base always reload
537 to avoid an information leak. */
538 if (unlikely(fsindex | next->fsindex | prev->fs)) {
539 loadsegment(fs, next->fsindex);
540 /* check if the user used a selector != 0
541 * if yes clear 64bit base, since overloaded base
542 * is always mapped to the Null selector
547 /* when next process has a 64bit base use it */
549 wrmsrl(MSR_FS_BASE, next->fs);
550 prev->fsindex = fsindex;
554 asm volatile("movl %%gs,%0" : "=r" (gsindex));
555 if (unlikely(gsindex | next->gsindex | prev->gs)) {
556 load_gs_index(next->gsindex);
561 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
562 prev->gsindex = gsindex;
566 * Switch the PDA context.
568 prev->userrsp = read_pda(oldrsp);
569 write_pda(oldrsp, next->userrsp);
570 write_pda(pcurrent, next_p);
571 write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
574 * Now maybe reload the debug registers
576 if (unlikely(next->debugreg7)) {
588 * Handle the IO bitmap
590 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
591 if (next->io_bitmap_ptr)
593 * Copy the relevant range of the IO bitmap.
594 * Normally this is 128 bytes or less:
596 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
597 max(prev->io_bitmap_max, next->io_bitmap_max));
600 * Clear any possible leftover bits:
602 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
610 * sys_execve() executes a new program.
613 long sys_execve(char __user *name, char __user * __user *argv,
614 char __user * __user *envp, struct pt_regs regs)
619 filename = getname(name);
620 error = PTR_ERR(filename);
621 if (IS_ERR(filename))
623 error = do_execve(filename, argv, envp, ®s);
626 current->ptrace &= ~PT_DTRACE;
627 task_unlock(current);
633 void set_personality_64bit(void)
635 /* inherit personality from parent */
637 /* Make sure to be in 64bit mode */
638 clear_thread_flag(TIF_IA32);
640 /* TBD: overwrites user setup. Should have two bits.
641 But 64bit processes have always behaved this way,
642 so it's not too bad. The main problem is just that
643 32bit childs are affected again. */
644 current->personality &= ~READ_IMPLIES_EXEC;
647 asmlinkage long sys_fork(struct pt_regs *regs)
649 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
652 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
656 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
660 * This is trivial, and on the face of it looks like it
661 * could equally well be done in user mode.
663 * Not so, for quite unobvious reasons - register pressure.
664 * In user mode vfork() cannot have a stack frame, and if
665 * done by calling the "clone()" system call directly, you
666 * do not have enough call-clobbered registers to hold all
667 * the information you need.
669 asmlinkage long sys_vfork(struct pt_regs *regs)
671 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
675 unsigned long get_wchan(struct task_struct *p)
681 if (!p || p == current || p->state==TASK_RUNNING)
683 stack = (unsigned long)p->thread_info;
684 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
686 fp = *(u64 *)(p->thread.rsp);
688 if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
690 rip = *(u64 *)(fp+8);
691 if (!in_sched_functions(rip))
694 } while (count++ < 16);
698 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
701 int doit = task == current;
706 if (addr >= TASK_SIZE_OF(task))
709 /* handle small bases via the GDT because that's faster to
711 if (addr <= 0xffffffff) {
712 set_32bit_tls(task, GS_TLS, addr);
714 load_TLS(&task->thread, cpu);
715 load_gs_index(GS_TLS_SEL);
717 task->thread.gsindex = GS_TLS_SEL;
720 task->thread.gsindex = 0;
721 task->thread.gs = addr;
724 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
730 /* Not strictly needed for fs, but do it for symmetry
732 if (addr >= TASK_SIZE_OF(task))
735 /* handle small bases via the GDT because that's faster to
737 if (addr <= 0xffffffff) {
738 set_32bit_tls(task, FS_TLS, addr);
740 load_TLS(&task->thread, cpu);
741 asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
743 task->thread.fsindex = FS_TLS_SEL;
746 task->thread.fsindex = 0;
747 task->thread.fs = addr;
749 /* set the selector to 0 to not confuse
751 asm volatile("movl %0,%%fs" :: "r" (0));
752 ret = checking_wrmsrl(MSR_FS_BASE, addr);
759 if (task->thread.fsindex == FS_TLS_SEL)
760 base = read_32bit_tls(task, FS_TLS);
762 rdmsrl(MSR_FS_BASE, base);
764 base = task->thread.fs;
765 ret = put_user(base, (unsigned long __user *)addr);
770 if (task->thread.gsindex == GS_TLS_SEL)
771 base = read_32bit_tls(task, GS_TLS);
773 rdmsrl(MSR_KERNEL_GS_BASE, base);
775 base = task->thread.gs;
776 ret = put_user(base, (unsigned long __user *)addr);
788 long sys_arch_prctl(int code, unsigned long addr)
790 return do_arch_prctl(current, code, addr);
794 * Capture the user space registers if the task is not running (in user space)
796 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
798 struct pt_regs *pp, ptregs;
800 pp = (struct pt_regs *)(tsk->thread.rsp0);
807 elf_core_copy_regs(regs, &ptregs);
812 unsigned long arch_align_stack(unsigned long sp)
814 if (randomize_va_space)
815 sp -= get_random_int() % 8192;