2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
19 #include <linux/cpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/utsname.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/ptrace.h>
34 #include <linux/random.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/tick.h>
39 #include <linux/prctl.h>
40 #include <linux/uaccess.h>
42 #include <linux/ftrace.h>
44 #include <asm/pgtable.h>
45 #include <asm/system.h>
46 #include <asm/processor.h>
48 #include <asm/mmu_context.h>
50 #include <asm/prctl.h>
52 #include <asm/proto.h>
55 #include <asm/syscalls.h>
57 asmlinkage extern void ret_from_fork(void);
59 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
61 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63 void idle_notifier_register(struct notifier_block *n)
65 atomic_notifier_chain_register(&idle_notifier, n);
67 EXPORT_SYMBOL_GPL(idle_notifier_register);
69 void idle_notifier_unregister(struct notifier_block *n)
71 atomic_notifier_chain_unregister(&idle_notifier, n);
73 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
78 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
81 static void __exit_idle(void)
83 if (test_and_clear_bit_pda(0, isidle) == 0)
85 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
88 /* Called from interrupts to signify idle end */
91 /* idle loop has pid 0 */
98 static inline void play_dead(void)
105 * The idle thread. There's no useful work to be
106 * done, so just try to conserve power and have a
107 * low exit latency (ie sit in a loop waiting for
108 * somebody to say that they'd like to reschedule)
112 current_thread_info()->status |= TS_POLLING;
113 /* endless idle loop with no priority at all */
115 tick_nohz_stop_sched_tick(1);
116 while (!need_resched()) {
120 if (cpu_is_offline(smp_processor_id()))
123 * Idle routines should keep interrupts disabled
124 * from here on, until they go to idle.
125 * Otherwise, idle callbacks can misfire.
129 /* Don't trace irqs off for idle */
130 stop_critical_timings();
132 start_critical_timings();
133 /* In many cases the interrupt that ended idle
134 has already called exit_idle. But some idle
135 loops can be woken up without interrupt. */
139 tick_nohz_restart_sched_tick();
140 preempt_enable_no_resched();
146 /* Prints also some state that isn't saved in the pt_regs */
147 void __show_regs(struct pt_regs *regs, int all)
149 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
150 unsigned long d0, d1, d2, d3, d6, d7;
151 unsigned int fsindex, gsindex;
152 unsigned int ds, cs, es;
156 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
157 current->pid, current->comm, print_tainted(),
158 init_utsname()->release,
159 (int)strcspn(init_utsname()->version, " "),
160 init_utsname()->version);
161 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
162 printk_address(regs->ip, 1);
163 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
164 regs->sp, regs->flags);
165 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
166 regs->ax, regs->bx, regs->cx);
167 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
168 regs->dx, regs->si, regs->di);
169 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
170 regs->bp, regs->r8, regs->r9);
171 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
172 regs->r10, regs->r11, regs->r12);
173 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
174 regs->r13, regs->r14, regs->r15);
176 asm("movl %%ds,%0" : "=r" (ds));
177 asm("movl %%cs,%0" : "=r" (cs));
178 asm("movl %%es,%0" : "=r" (es));
179 asm("movl %%fs,%0" : "=r" (fsindex));
180 asm("movl %%gs,%0" : "=r" (gsindex));
182 rdmsrl(MSR_FS_BASE, fs);
183 rdmsrl(MSR_GS_BASE, gs);
184 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
194 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
195 fs, fsindex, gs, gsindex, shadowgs);
196 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
198 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
204 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
208 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
211 void show_regs(struct pt_regs *regs)
213 printk(KERN_INFO "CPU %d:", smp_processor_id());
214 __show_regs(regs, 1);
215 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
219 * Free current thread data structures etc..
221 void exit_thread(void)
223 struct task_struct *me = current;
224 struct thread_struct *t = &me->thread;
226 if (me->thread.io_bitmap_ptr) {
227 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
229 kfree(t->io_bitmap_ptr);
230 t->io_bitmap_ptr = NULL;
231 clear_thread_flag(TIF_IO_BITMAP);
233 * Careful, clear this in the TSS too:
235 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
236 t->io_bitmap_max = 0;
240 /* Free any DS contexts that have not been properly released. */
241 if (unlikely(t->ds_ctx)) {
242 /* we clear debugctl to make sure DS is not used. */
243 update_debugctlmsr(0);
246 #endif /* CONFIG_X86_DS */
249 void flush_thread(void)
251 struct task_struct *tsk = current;
253 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
254 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
255 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
256 clear_tsk_thread_flag(tsk, TIF_IA32);
258 set_tsk_thread_flag(tsk, TIF_IA32);
259 current_thread_info()->status |= TS_COMPAT;
262 clear_tsk_thread_flag(tsk, TIF_DEBUG);
264 tsk->thread.debugreg0 = 0;
265 tsk->thread.debugreg1 = 0;
266 tsk->thread.debugreg2 = 0;
267 tsk->thread.debugreg3 = 0;
268 tsk->thread.debugreg6 = 0;
269 tsk->thread.debugreg7 = 0;
270 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
272 * Forget coprocessor state..
274 tsk->fpu_counter = 0;
279 void release_thread(struct task_struct *dead_task)
282 if (dead_task->mm->context.size) {
283 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
285 dead_task->mm->context.ldt,
286 dead_task->mm->context.size);
292 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
294 struct user_desc ud = {
301 struct desc_struct *desc = t->thread.tls_array;
306 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
308 return get_desc_base(&t->thread.tls_array[tls]);
312 * This gets called before we allocate a new thread and copy
313 * the current task into it.
315 void prepare_to_copy(struct task_struct *tsk)
320 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
321 unsigned long unused,
322 struct task_struct *p, struct pt_regs *regs)
325 struct pt_regs *childregs;
326 struct task_struct *me = current;
328 childregs = ((struct pt_regs *)
329 (THREAD_SIZE + task_stack_page(p))) - 1;
335 childregs->sp = (unsigned long)childregs;
337 p->thread.sp = (unsigned long) childregs;
338 p->thread.sp0 = (unsigned long) (childregs+1);
339 p->thread.usersp = me->thread.usersp;
341 set_tsk_thread_flag(p, TIF_FORK);
343 p->thread.fs = me->thread.fs;
344 p->thread.gs = me->thread.gs;
346 savesegment(gs, p->thread.gsindex);
347 savesegment(fs, p->thread.fsindex);
348 savesegment(es, p->thread.es);
349 savesegment(ds, p->thread.ds);
351 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
352 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
353 if (!p->thread.io_bitmap_ptr) {
354 p->thread.io_bitmap_max = 0;
357 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
359 set_tsk_thread_flag(p, TIF_IO_BITMAP);
363 * Set a new TLS for the child thread?
365 if (clone_flags & CLONE_SETTLS) {
366 #ifdef CONFIG_IA32_EMULATION
367 if (test_thread_flag(TIF_IA32))
368 err = do_set_thread_area(p, -1,
369 (struct user_desc __user *)childregs->si, 0);
372 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
378 if (err && p->thread.io_bitmap_ptr) {
379 kfree(p->thread.io_bitmap_ptr);
380 p->thread.io_bitmap_max = 0;
386 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
394 write_pda(oldrsp, new_sp);
395 regs->cs = __USER_CS;
396 regs->ss = __USER_DS;
400 * Free the old FP and other extended state
402 free_thread_xstate(current);
404 EXPORT_SYMBOL_GPL(start_thread);
406 static void hard_disable_TSC(void)
408 write_cr4(read_cr4() | X86_CR4_TSD);
411 void disable_TSC(void)
414 if (!test_and_set_thread_flag(TIF_NOTSC))
416 * Must flip the CPU state synchronously with
417 * TIF_NOTSC in the current running context.
423 static void hard_enable_TSC(void)
425 write_cr4(read_cr4() & ~X86_CR4_TSD);
428 static void enable_TSC(void)
431 if (test_and_clear_thread_flag(TIF_NOTSC))
433 * Must flip the CPU state synchronously with
434 * TIF_NOTSC in the current running context.
440 int get_tsc_mode(unsigned long adr)
444 if (test_thread_flag(TIF_NOTSC))
445 val = PR_TSC_SIGSEGV;
449 return put_user(val, (unsigned int __user *)adr);
452 int set_tsc_mode(unsigned int val)
454 if (val == PR_TSC_SIGSEGV)
456 else if (val == PR_TSC_ENABLE)
465 * This special macro can be used to load a debugging register
467 #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
469 static inline void __switch_to_xtra(struct task_struct *prev_p,
470 struct task_struct *next_p,
471 struct tss_struct *tss)
473 struct thread_struct *prev, *next;
474 unsigned long debugctl;
476 prev = &prev_p->thread,
477 next = &next_p->thread;
479 debugctl = prev->debugctlmsr;
483 unsigned long ds_prev = 0, ds_next = 0;
486 ds_prev = (unsigned long)prev->ds_ctx->ds;
488 ds_next = (unsigned long)next->ds_ctx->ds;
490 if (ds_next != ds_prev) {
492 * We clear debugctl to make sure DS
493 * is not in use when we change it:
496 update_debugctlmsr(0);
497 wrmsrl(MSR_IA32_DS_AREA, ds_next);
500 #endif /* CONFIG_X86_DS */
502 if (next->debugctlmsr != debugctl)
503 update_debugctlmsr(next->debugctlmsr);
505 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
515 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
516 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
517 /* prev and next are different */
518 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
524 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
526 * Copy the relevant range of the IO bitmap.
527 * Normally this is 128 bytes or less:
529 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
530 max(prev->io_bitmap_max, next->io_bitmap_max));
531 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
533 * Clear any possible leftover bits:
535 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
538 #ifdef CONFIG_X86_PTRACE_BTS
539 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
540 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
542 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
543 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
544 #endif /* CONFIG_X86_PTRACE_BTS */
548 * switch_to(x,y) should switch tasks from x to y.
550 * This could still be optimized:
551 * - fold all the options into a flag word and test it with a single test.
552 * - could test fs/gs bitsliced
554 * Kprobes not supported here. Set the probe on schedule instead.
555 * Function graph tracer not supported too.
557 __notrace_funcgraph struct task_struct *
558 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
560 struct thread_struct *prev = &prev_p->thread;
561 struct thread_struct *next = &next_p->thread;
562 int cpu = smp_processor_id();
563 struct tss_struct *tss = &per_cpu(init_tss, cpu);
564 unsigned fsindex, gsindex;
566 /* we're going to use this soon, after a few expensive things */
567 if (next_p->fpu_counter > 5)
568 prefetch(next->xstate);
571 * Reload esp0, LDT and the page table pointer:
577 * This won't pick up thread selector changes, but I guess that is ok.
579 savesegment(es, prev->es);
580 if (unlikely(next->es | prev->es))
581 loadsegment(es, next->es);
583 savesegment(ds, prev->ds);
584 if (unlikely(next->ds | prev->ds))
585 loadsegment(ds, next->ds);
588 /* We must save %fs and %gs before load_TLS() because
589 * %fs and %gs may be cleared by load_TLS().
591 * (e.g. xen_load_tls())
593 savesegment(fs, fsindex);
594 savesegment(gs, gsindex);
599 * Leave lazy mode, flushing any hypercalls made here.
600 * This must be done before restoring TLS segments so
601 * the GDT and LDT are properly updated, and must be
602 * done before math_state_restore, so the TS bit is up
605 arch_leave_lazy_cpu_mode();
610 * Segment register != 0 always requires a reload. Also
611 * reload when it has changed. When prev process used 64bit
612 * base always reload to avoid an information leak.
614 if (unlikely(fsindex | next->fsindex | prev->fs)) {
615 loadsegment(fs, next->fsindex);
617 * Check if the user used a selector != 0; if yes
618 * clear 64bit base, since overloaded base is always
619 * mapped to the Null selector
624 /* when next process has a 64bit base use it */
626 wrmsrl(MSR_FS_BASE, next->fs);
627 prev->fsindex = fsindex;
629 if (unlikely(gsindex | next->gsindex | prev->gs)) {
630 load_gs_index(next->gsindex);
635 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
636 prev->gsindex = gsindex;
638 /* Must be after DS reload */
642 * Switch the PDA and FPU contexts.
644 prev->usersp = read_pda(oldrsp);
645 write_pda(oldrsp, next->usersp);
646 write_pda(pcurrent, next_p);
648 write_pda(kernelstack,
649 (unsigned long)task_stack_page(next_p) +
650 THREAD_SIZE - PDA_STACKOFFSET);
651 #ifdef CONFIG_CC_STACKPROTECTOR
652 write_pda(stack_canary, next_p->stack_canary);
654 * Build time only check to make sure the stack_canary is at
655 * offset 40 in the pda; this is a gcc ABI requirement
657 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
661 * Now maybe reload the debug registers and handle I/O bitmaps
663 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
664 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
665 __switch_to_xtra(prev_p, next_p, tss);
667 /* If the task has used fpu the last 5 timeslices, just do a full
668 * restore of the math state immediately to avoid the trap; the
669 * chances of needing FPU soon are obviously high now
671 * tsk_used_math() checks prevent calling math_state_restore(),
672 * which can sleep in the case of !tsk_used_math()
674 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
675 math_state_restore();
680 * sys_execve() executes a new program.
683 long sys_execve(char __user *name, char __user * __user *argv,
684 char __user * __user *envp, struct pt_regs *regs)
689 filename = getname(name);
690 error = PTR_ERR(filename);
691 if (IS_ERR(filename))
693 error = do_execve(filename, argv, envp, regs);
698 void set_personality_64bit(void)
700 /* inherit personality from parent */
702 /* Make sure to be in 64bit mode */
703 clear_thread_flag(TIF_IA32);
705 /* TBD: overwrites user setup. Should have two bits.
706 But 64bit processes have always behaved this way,
707 so it's not too bad. The main problem is just that
708 32bit childs are affected again. */
709 current->personality &= ~READ_IMPLIES_EXEC;
712 asmlinkage long sys_fork(struct pt_regs *regs)
714 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
718 sys_clone(unsigned long clone_flags, unsigned long newsp,
719 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
723 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
727 * This is trivial, and on the face of it looks like it
728 * could equally well be done in user mode.
730 * Not so, for quite unobvious reasons - register pressure.
731 * In user mode vfork() cannot have a stack frame, and if
732 * done by calling the "clone()" system call directly, you
733 * do not have enough call-clobbered registers to hold all
734 * the information you need.
736 asmlinkage long sys_vfork(struct pt_regs *regs)
738 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
742 unsigned long get_wchan(struct task_struct *p)
748 if (!p || p == current || p->state == TASK_RUNNING)
750 stack = (unsigned long)task_stack_page(p);
751 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
753 fp = *(u64 *)(p->thread.sp);
755 if (fp < (unsigned long)stack ||
756 fp >= (unsigned long)stack+THREAD_SIZE)
759 if (!in_sched_functions(ip))
762 } while (count++ < 16);
766 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
769 int doit = task == current;
774 if (addr >= TASK_SIZE_OF(task))
777 /* handle small bases via the GDT because that's faster to
779 if (addr <= 0xffffffff) {
780 set_32bit_tls(task, GS_TLS, addr);
782 load_TLS(&task->thread, cpu);
783 load_gs_index(GS_TLS_SEL);
785 task->thread.gsindex = GS_TLS_SEL;
788 task->thread.gsindex = 0;
789 task->thread.gs = addr;
792 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
798 /* Not strictly needed for fs, but do it for symmetry
800 if (addr >= TASK_SIZE_OF(task))
803 /* handle small bases via the GDT because that's faster to
805 if (addr <= 0xffffffff) {
806 set_32bit_tls(task, FS_TLS, addr);
808 load_TLS(&task->thread, cpu);
809 loadsegment(fs, FS_TLS_SEL);
811 task->thread.fsindex = FS_TLS_SEL;
814 task->thread.fsindex = 0;
815 task->thread.fs = addr;
817 /* set the selector to 0 to not confuse
820 ret = checking_wrmsrl(MSR_FS_BASE, addr);
827 if (task->thread.fsindex == FS_TLS_SEL)
828 base = read_32bit_tls(task, FS_TLS);
830 rdmsrl(MSR_FS_BASE, base);
832 base = task->thread.fs;
833 ret = put_user(base, (unsigned long __user *)addr);
839 if (task->thread.gsindex == GS_TLS_SEL)
840 base = read_32bit_tls(task, GS_TLS);
842 savesegment(gs, gsindex);
844 rdmsrl(MSR_KERNEL_GS_BASE, base);
846 base = task->thread.gs;
848 base = task->thread.gs;
849 ret = put_user(base, (unsigned long __user *)addr);
861 long sys_arch_prctl(int code, unsigned long addr)
863 return do_arch_prctl(current, code, addr);
866 unsigned long arch_align_stack(unsigned long sp)
868 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
869 sp -= get_random_int() % 8192;
873 unsigned long arch_randomize_brk(struct mm_struct *mm)
875 unsigned long range_end = mm->brk + 0x02000000;
876 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;