2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
17 * This file handles the architecture-dependent parts of process handling..
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/module.h>
32 #include <linux/a.out.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/ptrace.h>
36 #include <linux/utsname.h>
37 #include <linux/random.h>
38 #include <linux/kprobes.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
48 #include <asm/prctl.h>
49 #include <asm/kdebug.h>
51 #include <asm/proto.h>
54 asmlinkage extern void ret_from_fork(void);
56 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
58 static atomic_t hlt_counter = ATOMIC_INIT(0);
60 unsigned long boot_option_idle_override = 0;
61 EXPORT_SYMBOL(boot_option_idle_override);
64 * Powermanagement idle function, if any..
66 void (*pm_idle)(void);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 void disable_hlt(void)
71 atomic_inc(&hlt_counter);
74 EXPORT_SYMBOL(disable_hlt);
78 atomic_dec(&hlt_counter);
81 EXPORT_SYMBOL(enable_hlt);
84 * We use this if we don't have any better
87 void default_idle(void)
91 if (!atomic_read(&hlt_counter)) {
92 clear_thread_flag(TIF_POLLING_NRFLAG);
93 smp_mb__after_clear_bit();
94 while (!need_resched()) {
101 set_thread_flag(TIF_POLLING_NRFLAG);
103 while (!need_resched())
109 * On SMP it's slightly faster (but much more power-consuming!)
110 * to poll the ->need_resched flag instead of waiting for the
111 * cross-CPU IPI to arrive. Use this option with caution.
113 static void poll_idle (void)
123 "i" (_TIF_NEED_RESCHED),
124 "m" (current_thread_info()->flags));
127 void cpu_idle_wait(void)
129 unsigned int cpu, this_cpu = get_cpu();
132 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
136 for_each_online_cpu(cpu) {
137 per_cpu(cpu_idle_state, cpu) = 1;
141 __get_cpu_var(cpu_idle_state) = 0;
146 for_each_online_cpu(cpu) {
147 if (cpu_isset(cpu, map) &&
148 !per_cpu(cpu_idle_state, cpu))
151 cpus_and(map, map, cpu_online_map);
152 } while (!cpus_empty(map));
154 EXPORT_SYMBOL_GPL(cpu_idle_wait);
156 #ifdef CONFIG_HOTPLUG_CPU
157 DECLARE_PER_CPU(int, cpu_state);
160 /* We halt the CPU with physical CPU hotplug */
161 static inline void play_dead(void)
167 __get_cpu_var(cpu_state) = CPU_DEAD;
174 static inline void play_dead(void)
178 #endif /* CONFIG_HOTPLUG_CPU */
181 * The idle thread. There's no useful work to be
182 * done, so just try to conserve power and have a
183 * low exit latency (ie sit in a loop waiting for
184 * somebody to say that they'd like to reschedule)
188 set_thread_flag(TIF_POLLING_NRFLAG);
190 /* endless idle loop with no priority at all */
192 while (!need_resched()) {
195 if (__get_cpu_var(cpu_idle_state))
196 __get_cpu_var(cpu_idle_state) = 0;
202 if (cpu_is_offline(smp_processor_id()))
207 preempt_enable_no_resched();
214 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
215 * which can obviate IPI to trigger checking of need_resched.
216 * We execute MONITOR against need_resched and enter optimized wait state
217 * through MWAIT. Whenever someone changes need_resched, we would be woken
218 * up from MWAIT (without an IPI).
220 static void mwait_idle(void)
224 while (!need_resched()) {
225 __monitor((void *)¤t_thread_info()->flags, 0, 0);
233 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
236 if (cpu_has(c, X86_FEATURE_MWAIT)) {
238 * Skip, if setup has overridden idle.
239 * One CPU supports mwait => All CPUs supports mwait
243 printk("using mwait in idle threads.\n");
246 pm_idle = mwait_idle;
251 static int __init idle_setup (char *str)
253 if (!strncmp(str, "poll", 4)) {
254 printk("using polling idle threads.\n");
258 boot_option_idle_override = 1;
262 __setup("idle=", idle_setup);
264 /* Prints also some state that isn't saved in the pt_regs */
265 void __show_regs(struct pt_regs * regs)
267 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
268 unsigned int fsindex,gsindex;
269 unsigned int ds,cs,es;
273 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
274 current->pid, current->comm, print_tainted(),
275 system_utsname.release,
276 (int)strcspn(system_utsname.version, " "),
277 system_utsname.version);
278 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
279 printk_address(regs->rip);
280 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
282 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
283 regs->rax, regs->rbx, regs->rcx);
284 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
285 regs->rdx, regs->rsi, regs->rdi);
286 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
287 regs->rbp, regs->r8, regs->r9);
288 printk("R10: %016lx R11: %016lx R12: %016lx\n",
289 regs->r10, regs->r11, regs->r12);
290 printk("R13: %016lx R14: %016lx R15: %016lx\n",
291 regs->r13, regs->r14, regs->r15);
293 asm("movl %%ds,%0" : "=r" (ds));
294 asm("movl %%cs,%0" : "=r" (cs));
295 asm("movl %%es,%0" : "=r" (es));
296 asm("movl %%fs,%0" : "=r" (fsindex));
297 asm("movl %%gs,%0" : "=r" (gsindex));
299 rdmsrl(MSR_FS_BASE, fs);
300 rdmsrl(MSR_GS_BASE, gs);
301 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
303 asm("movq %%cr0, %0": "=r" (cr0));
304 asm("movq %%cr2, %0": "=r" (cr2));
305 asm("movq %%cr3, %0": "=r" (cr3));
306 asm("movq %%cr4, %0": "=r" (cr4));
308 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
309 fs,fsindex,gs,gsindex,shadowgs);
310 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
311 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
314 void show_regs(struct pt_regs *regs)
316 printk("CPU %d:", smp_processor_id());
318 show_trace(®s->rsp);
322 * Free current thread data structures etc..
324 void exit_thread(void)
326 struct task_struct *me = current;
327 struct thread_struct *t = &me->thread;
330 * Remove function-return probe instances associated with this task
331 * and put them back on the free list. Do not insert an exit probe for
332 * this function, it will be disabled by kprobe_flush_task if you do.
334 kprobe_flush_task(me);
336 if (me->thread.io_bitmap_ptr) {
337 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
339 kfree(t->io_bitmap_ptr);
340 t->io_bitmap_ptr = NULL;
342 * Careful, clear this in the TSS too:
344 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
345 t->io_bitmap_max = 0;
350 void flush_thread(void)
352 struct task_struct *tsk = current;
353 struct thread_info *t = current_thread_info();
355 if (t->flags & _TIF_ABI_PENDING)
356 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
358 tsk->thread.debugreg0 = 0;
359 tsk->thread.debugreg1 = 0;
360 tsk->thread.debugreg2 = 0;
361 tsk->thread.debugreg3 = 0;
362 tsk->thread.debugreg6 = 0;
363 tsk->thread.debugreg7 = 0;
364 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
366 * Forget coprocessor state..
372 void release_thread(struct task_struct *dead_task)
375 if (dead_task->mm->context.size) {
376 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
378 dead_task->mm->context.ldt,
379 dead_task->mm->context.size);
385 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
387 struct user_desc ud = {
394 struct n_desc_struct *desc = (void *)t->thread.tls_array;
396 desc->a = LDT_entry_a(&ud);
397 desc->b = LDT_entry_b(&ud);
400 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
402 struct desc_struct *desc = (void *)t->thread.tls_array;
405 (((u32)desc->base1) << 16) |
406 (((u32)desc->base2) << 24);
410 * This gets called before we allocate a new thread and copy
411 * the current task into it.
413 void prepare_to_copy(struct task_struct *tsk)
418 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
419 unsigned long unused,
420 struct task_struct * p, struct pt_regs * regs)
423 struct pt_regs * childregs;
424 struct task_struct *me = current;
426 childregs = ((struct pt_regs *)
427 (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
431 childregs->rsp = rsp;
433 childregs->rsp = (unsigned long)childregs;
435 p->thread.rsp = (unsigned long) childregs;
436 p->thread.rsp0 = (unsigned long) (childregs+1);
437 p->thread.userrsp = me->thread.userrsp;
439 set_ti_thread_flag(p->thread_info, TIF_FORK);
441 p->thread.fs = me->thread.fs;
442 p->thread.gs = me->thread.gs;
444 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
445 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
446 asm("mov %%es,%0" : "=m" (p->thread.es));
447 asm("mov %%ds,%0" : "=m" (p->thread.ds));
449 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
450 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
451 if (!p->thread.io_bitmap_ptr) {
452 p->thread.io_bitmap_max = 0;
455 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
460 * Set a new TLS for the child thread?
462 if (clone_flags & CLONE_SETTLS) {
463 #ifdef CONFIG_IA32_EMULATION
464 if (test_thread_flag(TIF_IA32))
465 err = ia32_child_tls(p, childregs);
468 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
474 if (err && p->thread.io_bitmap_ptr) {
475 kfree(p->thread.io_bitmap_ptr);
476 p->thread.io_bitmap_max = 0;
482 * This special macro can be used to load a debugging register
484 #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
487 * switch_to(x,y) should switch tasks from x to y.
489 * This could still be optimized:
490 * - fold all the options into a flag word and test it with a single test.
491 * - could test fs/gs bitsliced
494 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
496 struct thread_struct *prev = &prev_p->thread,
497 *next = &next_p->thread;
498 int cpu = smp_processor_id();
499 struct tss_struct *tss = &per_cpu(init_tss, cpu);
504 * Reload esp0, LDT and the page table pointer:
506 tss->rsp0 = next->rsp0;
510 * This won't pick up thread selector changes, but I guess that is ok.
512 asm volatile("mov %%es,%0" : "=m" (prev->es));
513 if (unlikely(next->es | prev->es))
514 loadsegment(es, next->es);
516 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
517 if (unlikely(next->ds | prev->ds))
518 loadsegment(ds, next->ds);
527 asm volatile("movl %%fs,%0" : "=r" (fsindex));
528 /* segment register != 0 always requires a reload.
529 also reload when it has changed.
530 when prev process used 64bit base always reload
531 to avoid an information leak. */
532 if (unlikely(fsindex | next->fsindex | prev->fs)) {
533 loadsegment(fs, next->fsindex);
534 /* check if the user used a selector != 0
535 * if yes clear 64bit base, since overloaded base
536 * is always mapped to the Null selector
541 /* when next process has a 64bit base use it */
543 wrmsrl(MSR_FS_BASE, next->fs);
544 prev->fsindex = fsindex;
548 asm volatile("movl %%gs,%0" : "=r" (gsindex));
549 if (unlikely(gsindex | next->gsindex | prev->gs)) {
550 load_gs_index(next->gsindex);
555 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
556 prev->gsindex = gsindex;
560 * Switch the PDA context.
562 prev->userrsp = read_pda(oldrsp);
563 write_pda(oldrsp, next->userrsp);
564 write_pda(pcurrent, next_p);
565 write_pda(kernelstack,
566 (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
569 * Now maybe reload the debug registers
571 if (unlikely(next->debugreg7)) {
583 * Handle the IO bitmap
585 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
586 if (next->io_bitmap_ptr)
588 * Copy the relevant range of the IO bitmap.
589 * Normally this is 128 bytes or less:
591 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
592 max(prev->io_bitmap_max, next->io_bitmap_max));
595 * Clear any possible leftover bits:
597 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
605 * sys_execve() executes a new program.
608 long sys_execve(char __user *name, char __user * __user *argv,
609 char __user * __user *envp, struct pt_regs regs)
614 filename = getname(name);
615 error = PTR_ERR(filename);
616 if (IS_ERR(filename))
618 error = do_execve(filename, argv, envp, ®s);
621 current->ptrace &= ~PT_DTRACE;
622 task_unlock(current);
628 void set_personality_64bit(void)
630 /* inherit personality from parent */
632 /* Make sure to be in 64bit mode */
633 clear_thread_flag(TIF_IA32);
635 /* TBD: overwrites user setup. Should have two bits.
636 But 64bit processes have always behaved this way,
637 so it's not too bad. The main problem is just that
638 32bit childs are affected again. */
639 current->personality &= ~READ_IMPLIES_EXEC;
642 asmlinkage long sys_fork(struct pt_regs *regs)
644 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
648 sys_clone(unsigned long clone_flags, unsigned long newsp,
649 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
653 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
657 * This is trivial, and on the face of it looks like it
658 * could equally well be done in user mode.
660 * Not so, for quite unobvious reasons - register pressure.
661 * In user mode vfork() cannot have a stack frame, and if
662 * done by calling the "clone()" system call directly, you
663 * do not have enough call-clobbered registers to hold all
664 * the information you need.
666 asmlinkage long sys_vfork(struct pt_regs *regs)
668 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
672 unsigned long get_wchan(struct task_struct *p)
678 if (!p || p == current || p->state==TASK_RUNNING)
680 stack = (unsigned long)p->thread_info;
681 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
683 fp = *(u64 *)(p->thread.rsp);
685 if (fp < (unsigned long)stack ||
686 fp > (unsigned long)stack+THREAD_SIZE)
688 rip = *(u64 *)(fp+8);
689 if (!in_sched_functions(rip))
692 } while (count++ < 16);
696 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
699 int doit = task == current;
704 if (addr >= TASK_SIZE_OF(task))
707 /* handle small bases via the GDT because that's faster to
709 if (addr <= 0xffffffff) {
710 set_32bit_tls(task, GS_TLS, addr);
712 load_TLS(&task->thread, cpu);
713 load_gs_index(GS_TLS_SEL);
715 task->thread.gsindex = GS_TLS_SEL;
718 task->thread.gsindex = 0;
719 task->thread.gs = addr;
722 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
728 /* Not strictly needed for fs, but do it for symmetry
730 if (addr >= TASK_SIZE_OF(task))
733 /* handle small bases via the GDT because that's faster to
735 if (addr <= 0xffffffff) {
736 set_32bit_tls(task, FS_TLS, addr);
738 load_TLS(&task->thread, cpu);
739 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
741 task->thread.fsindex = FS_TLS_SEL;
744 task->thread.fsindex = 0;
745 task->thread.fs = addr;
747 /* set the selector to 0 to not confuse
749 asm volatile("movl %0,%%fs" :: "r" (0));
750 ret = checking_wrmsrl(MSR_FS_BASE, addr);
757 if (task->thread.fsindex == FS_TLS_SEL)
758 base = read_32bit_tls(task, FS_TLS);
760 rdmsrl(MSR_FS_BASE, base);
762 base = task->thread.fs;
763 ret = put_user(base, (unsigned long __user *)addr);
768 if (task->thread.gsindex == GS_TLS_SEL)
769 base = read_32bit_tls(task, GS_TLS);
771 rdmsrl(MSR_KERNEL_GS_BASE, base);
773 base = task->thread.gs;
774 ret = put_user(base, (unsigned long __user *)addr);
786 long sys_arch_prctl(int code, unsigned long addr)
788 return do_arch_prctl(current, code, addr);
792 * Capture the user space registers if the task is not running (in user space)
794 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
796 struct pt_regs *pp, ptregs;
798 pp = (struct pt_regs *)(tsk->thread.rsp0);
805 elf_core_copy_regs(regs, &ptregs);
810 unsigned long arch_align_stack(unsigned long sp)
812 if (randomize_va_space)
813 sp -= get_random_int() % 8192;