2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
19 #include <linux/cpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/utsname.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/ptrace.h>
34 #include <linux/random.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/tick.h>
39 #include <linux/prctl.h>
40 #include <linux/uaccess.h>
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
49 #include <asm/prctl.h>
51 #include <asm/proto.h>
54 #include <asm/syscalls.h>
56 asmlinkage extern void ret_from_fork(void);
58 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
60 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
62 void idle_notifier_register(struct notifier_block *n)
64 atomic_notifier_chain_register(&idle_notifier, n);
66 EXPORT_SYMBOL_GPL(idle_notifier_register);
68 void idle_notifier_unregister(struct notifier_block *n)
70 atomic_notifier_chain_unregister(&idle_notifier, n);
72 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
77 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
80 static void __exit_idle(void)
82 if (test_and_clear_bit_pda(0, isidle) == 0)
84 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
87 /* Called from interrupts to signify idle end */
90 /* idle loop has pid 0 */
97 static inline void play_dead(void)
104 * The idle thread. There's no useful work to be
105 * done, so just try to conserve power and have a
106 * low exit latency (ie sit in a loop waiting for
107 * somebody to say that they'd like to reschedule)
111 current_thread_info()->status |= TS_POLLING;
112 /* endless idle loop with no priority at all */
114 tick_nohz_stop_sched_tick(1);
115 while (!need_resched()) {
119 if (cpu_is_offline(smp_processor_id()))
122 * Idle routines should keep interrupts disabled
123 * from here on, until they go to idle.
124 * Otherwise, idle callbacks can misfire.
128 /* Don't trace irqs off for idle */
129 stop_critical_timings();
131 start_critical_timings();
132 /* In many cases the interrupt that ended idle
133 has already called exit_idle. But some idle
134 loops can be woken up without interrupt. */
138 tick_nohz_restart_sched_tick();
139 preempt_enable_no_resched();
145 /* Prints also some state that isn't saved in the pt_regs */
146 void __show_regs(struct pt_regs *regs, int all)
148 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
149 unsigned long d0, d1, d2, d3, d6, d7;
150 unsigned int fsindex, gsindex;
151 unsigned int ds, cs, es;
155 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
156 current->pid, current->comm, print_tainted(),
157 init_utsname()->release,
158 (int)strcspn(init_utsname()->version, " "),
159 init_utsname()->version);
160 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
161 printk_address(regs->ip, 1);
162 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
163 regs->sp, regs->flags);
164 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
165 regs->ax, regs->bx, regs->cx);
166 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
167 regs->dx, regs->si, regs->di);
168 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
169 regs->bp, regs->r8, regs->r9);
170 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
171 regs->r10, regs->r11, regs->r12);
172 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
173 regs->r13, regs->r14, regs->r15);
175 asm("movl %%ds,%0" : "=r" (ds));
176 asm("movl %%cs,%0" : "=r" (cs));
177 asm("movl %%es,%0" : "=r" (es));
178 asm("movl %%fs,%0" : "=r" (fsindex));
179 asm("movl %%gs,%0" : "=r" (gsindex));
181 rdmsrl(MSR_FS_BASE, fs);
182 rdmsrl(MSR_GS_BASE, gs);
183 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
193 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
194 fs, fsindex, gs, gsindex, shadowgs);
195 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
197 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
203 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
207 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
210 void show_regs(struct pt_regs *regs)
212 printk(KERN_INFO "CPU %d:", smp_processor_id());
213 __show_regs(regs, 1);
214 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
218 * Free current thread data structures etc..
220 void exit_thread(void)
222 struct task_struct *me = current;
223 struct thread_struct *t = &me->thread;
225 if (me->thread.io_bitmap_ptr) {
226 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
228 kfree(t->io_bitmap_ptr);
229 t->io_bitmap_ptr = NULL;
230 clear_thread_flag(TIF_IO_BITMAP);
232 * Careful, clear this in the TSS too:
234 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
235 t->io_bitmap_max = 0;
239 /* Free any DS contexts that have not been properly released. */
240 if (unlikely(t->ds_ctx)) {
241 /* we clear debugctl to make sure DS is not used. */
242 update_debugctlmsr(0);
245 #endif /* CONFIG_X86_DS */
248 void flush_thread(void)
250 struct task_struct *tsk = current;
252 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
253 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
254 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
255 clear_tsk_thread_flag(tsk, TIF_IA32);
257 set_tsk_thread_flag(tsk, TIF_IA32);
258 current_thread_info()->status |= TS_COMPAT;
261 clear_tsk_thread_flag(tsk, TIF_DEBUG);
263 tsk->thread.debugreg0 = 0;
264 tsk->thread.debugreg1 = 0;
265 tsk->thread.debugreg2 = 0;
266 tsk->thread.debugreg3 = 0;
267 tsk->thread.debugreg6 = 0;
268 tsk->thread.debugreg7 = 0;
269 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
271 * Forget coprocessor state..
273 tsk->fpu_counter = 0;
278 void release_thread(struct task_struct *dead_task)
281 if (dead_task->mm->context.size) {
282 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
284 dead_task->mm->context.ldt,
285 dead_task->mm->context.size);
291 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
293 struct user_desc ud = {
300 struct desc_struct *desc = t->thread.tls_array;
305 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
307 return get_desc_base(&t->thread.tls_array[tls]);
311 * This gets called before we allocate a new thread and copy
312 * the current task into it.
314 void prepare_to_copy(struct task_struct *tsk)
319 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
320 unsigned long unused,
321 struct task_struct *p, struct pt_regs *regs)
324 struct pt_regs *childregs;
325 struct task_struct *me = current;
327 childregs = ((struct pt_regs *)
328 (THREAD_SIZE + task_stack_page(p))) - 1;
334 childregs->sp = (unsigned long)childregs;
336 p->thread.sp = (unsigned long) childregs;
337 p->thread.sp0 = (unsigned long) (childregs+1);
338 p->thread.usersp = me->thread.usersp;
340 set_tsk_thread_flag(p, TIF_FORK);
342 p->thread.fs = me->thread.fs;
343 p->thread.gs = me->thread.gs;
345 savesegment(gs, p->thread.gsindex);
346 savesegment(fs, p->thread.fsindex);
347 savesegment(es, p->thread.es);
348 savesegment(ds, p->thread.ds);
350 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
351 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
352 if (!p->thread.io_bitmap_ptr) {
353 p->thread.io_bitmap_max = 0;
356 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
358 set_tsk_thread_flag(p, TIF_IO_BITMAP);
362 * Set a new TLS for the child thread?
364 if (clone_flags & CLONE_SETTLS) {
365 #ifdef CONFIG_IA32_EMULATION
366 if (test_thread_flag(TIF_IA32))
367 err = do_set_thread_area(p, -1,
368 (struct user_desc __user *)childregs->si, 0);
371 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
377 if (err && p->thread.io_bitmap_ptr) {
378 kfree(p->thread.io_bitmap_ptr);
379 p->thread.io_bitmap_max = 0;
385 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
393 write_pda(oldrsp, new_sp);
394 regs->cs = __USER_CS;
395 regs->ss = __USER_DS;
399 * Free the old FP and other extended state
401 free_thread_xstate(current);
403 EXPORT_SYMBOL_GPL(start_thread);
405 static void hard_disable_TSC(void)
407 write_cr4(read_cr4() | X86_CR4_TSD);
410 void disable_TSC(void)
413 if (!test_and_set_thread_flag(TIF_NOTSC))
415 * Must flip the CPU state synchronously with
416 * TIF_NOTSC in the current running context.
422 static void hard_enable_TSC(void)
424 write_cr4(read_cr4() & ~X86_CR4_TSD);
427 static void enable_TSC(void)
430 if (test_and_clear_thread_flag(TIF_NOTSC))
432 * Must flip the CPU state synchronously with
433 * TIF_NOTSC in the current running context.
439 int get_tsc_mode(unsigned long adr)
443 if (test_thread_flag(TIF_NOTSC))
444 val = PR_TSC_SIGSEGV;
448 return put_user(val, (unsigned int __user *)adr);
451 int set_tsc_mode(unsigned int val)
453 if (val == PR_TSC_SIGSEGV)
455 else if (val == PR_TSC_ENABLE)
464 * This special macro can be used to load a debugging register
466 #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
468 static inline void __switch_to_xtra(struct task_struct *prev_p,
469 struct task_struct *next_p,
470 struct tss_struct *tss)
472 struct thread_struct *prev, *next;
473 unsigned long debugctl;
475 prev = &prev_p->thread,
476 next = &next_p->thread;
478 debugctl = prev->debugctlmsr;
482 unsigned long ds_prev = 0, ds_next = 0;
485 ds_prev = (unsigned long)prev->ds_ctx->ds;
487 ds_next = (unsigned long)next->ds_ctx->ds;
489 if (ds_next != ds_prev) {
491 * We clear debugctl to make sure DS
492 * is not in use when we change it:
495 update_debugctlmsr(0);
496 wrmsrl(MSR_IA32_DS_AREA, ds_next);
499 #endif /* CONFIG_X86_DS */
501 if (next->debugctlmsr != debugctl)
502 update_debugctlmsr(next->debugctlmsr);
504 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
514 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
515 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
516 /* prev and next are different */
517 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
523 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
525 * Copy the relevant range of the IO bitmap.
526 * Normally this is 128 bytes or less:
528 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
529 max(prev->io_bitmap_max, next->io_bitmap_max));
530 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
532 * Clear any possible leftover bits:
534 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
537 #ifdef CONFIG_X86_PTRACE_BTS
538 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
539 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
541 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
542 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
543 #endif /* CONFIG_X86_PTRACE_BTS */
547 * switch_to(x,y) should switch tasks from x to y.
549 * This could still be optimized:
550 * - fold all the options into a flag word and test it with a single test.
551 * - could test fs/gs bitsliced
553 * Kprobes not supported here. Set the probe on schedule instead.
556 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
558 struct thread_struct *prev = &prev_p->thread;
559 struct thread_struct *next = &next_p->thread;
560 int cpu = smp_processor_id();
561 struct tss_struct *tss = &per_cpu(init_tss, cpu);
562 unsigned fsindex, gsindex;
564 /* we're going to use this soon, after a few expensive things */
565 if (next_p->fpu_counter > 5)
566 prefetch(next->xstate);
569 * Reload esp0, LDT and the page table pointer:
575 * This won't pick up thread selector changes, but I guess that is ok.
577 savesegment(es, prev->es);
578 if (unlikely(next->es | prev->es))
579 loadsegment(es, next->es);
581 savesegment(ds, prev->ds);
582 if (unlikely(next->ds | prev->ds))
583 loadsegment(ds, next->ds);
586 /* We must save %fs and %gs before load_TLS() because
587 * %fs and %gs may be cleared by load_TLS().
589 * (e.g. xen_load_tls())
591 savesegment(fs, fsindex);
592 savesegment(gs, gsindex);
597 * Leave lazy mode, flushing any hypercalls made here.
598 * This must be done before restoring TLS segments so
599 * the GDT and LDT are properly updated, and must be
600 * done before math_state_restore, so the TS bit is up
603 arch_leave_lazy_cpu_mode();
608 * Segment register != 0 always requires a reload. Also
609 * reload when it has changed. When prev process used 64bit
610 * base always reload to avoid an information leak.
612 if (unlikely(fsindex | next->fsindex | prev->fs)) {
613 loadsegment(fs, next->fsindex);
615 * Check if the user used a selector != 0; if yes
616 * clear 64bit base, since overloaded base is always
617 * mapped to the Null selector
622 /* when next process has a 64bit base use it */
624 wrmsrl(MSR_FS_BASE, next->fs);
625 prev->fsindex = fsindex;
627 if (unlikely(gsindex | next->gsindex | prev->gs)) {
628 load_gs_index(next->gsindex);
633 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
634 prev->gsindex = gsindex;
636 /* Must be after DS reload */
640 * Switch the PDA and FPU contexts.
642 prev->usersp = read_pda(oldrsp);
643 write_pda(oldrsp, next->usersp);
644 write_pda(pcurrent, next_p);
646 write_pda(kernelstack,
647 (unsigned long)task_stack_page(next_p) +
648 THREAD_SIZE - PDA_STACKOFFSET);
649 #ifdef CONFIG_CC_STACKPROTECTOR
650 write_pda(stack_canary, next_p->stack_canary);
652 * Build time only check to make sure the stack_canary is at
653 * offset 40 in the pda; this is a gcc ABI requirement
655 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
659 * Now maybe reload the debug registers and handle I/O bitmaps
661 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
662 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
663 __switch_to_xtra(prev_p, next_p, tss);
665 /* If the task has used fpu the last 5 timeslices, just do a full
666 * restore of the math state immediately to avoid the trap; the
667 * chances of needing FPU soon are obviously high now
669 * tsk_used_math() checks prevent calling math_state_restore(),
670 * which can sleep in the case of !tsk_used_math()
672 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
673 math_state_restore();
678 * sys_execve() executes a new program.
681 long sys_execve(char __user *name, char __user * __user *argv,
682 char __user * __user *envp, struct pt_regs *regs)
687 filename = getname(name);
688 error = PTR_ERR(filename);
689 if (IS_ERR(filename))
691 error = do_execve(filename, argv, envp, regs);
696 void set_personality_64bit(void)
698 /* inherit personality from parent */
700 /* Make sure to be in 64bit mode */
701 clear_thread_flag(TIF_IA32);
703 /* TBD: overwrites user setup. Should have two bits.
704 But 64bit processes have always behaved this way,
705 so it's not too bad. The main problem is just that
706 32bit childs are affected again. */
707 current->personality &= ~READ_IMPLIES_EXEC;
710 asmlinkage long sys_fork(struct pt_regs *regs)
712 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
716 sys_clone(unsigned long clone_flags, unsigned long newsp,
717 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
721 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
725 * This is trivial, and on the face of it looks like it
726 * could equally well be done in user mode.
728 * Not so, for quite unobvious reasons - register pressure.
729 * In user mode vfork() cannot have a stack frame, and if
730 * done by calling the "clone()" system call directly, you
731 * do not have enough call-clobbered registers to hold all
732 * the information you need.
734 asmlinkage long sys_vfork(struct pt_regs *regs)
736 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
740 unsigned long get_wchan(struct task_struct *p)
746 if (!p || p == current || p->state == TASK_RUNNING)
748 stack = (unsigned long)task_stack_page(p);
749 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
751 fp = *(u64 *)(p->thread.sp);
753 if (fp < (unsigned long)stack ||
754 fp >= (unsigned long)stack+THREAD_SIZE)
757 if (!in_sched_functions(ip))
760 } while (count++ < 16);
764 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
767 int doit = task == current;
772 if (addr >= TASK_SIZE_OF(task))
775 /* handle small bases via the GDT because that's faster to
777 if (addr <= 0xffffffff) {
778 set_32bit_tls(task, GS_TLS, addr);
780 load_TLS(&task->thread, cpu);
781 load_gs_index(GS_TLS_SEL);
783 task->thread.gsindex = GS_TLS_SEL;
786 task->thread.gsindex = 0;
787 task->thread.gs = addr;
790 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
796 /* Not strictly needed for fs, but do it for symmetry
798 if (addr >= TASK_SIZE_OF(task))
801 /* handle small bases via the GDT because that's faster to
803 if (addr <= 0xffffffff) {
804 set_32bit_tls(task, FS_TLS, addr);
806 load_TLS(&task->thread, cpu);
807 loadsegment(fs, FS_TLS_SEL);
809 task->thread.fsindex = FS_TLS_SEL;
812 task->thread.fsindex = 0;
813 task->thread.fs = addr;
815 /* set the selector to 0 to not confuse
818 ret = checking_wrmsrl(MSR_FS_BASE, addr);
825 if (task->thread.fsindex == FS_TLS_SEL)
826 base = read_32bit_tls(task, FS_TLS);
828 rdmsrl(MSR_FS_BASE, base);
830 base = task->thread.fs;
831 ret = put_user(base, (unsigned long __user *)addr);
837 if (task->thread.gsindex == GS_TLS_SEL)
838 base = read_32bit_tls(task, GS_TLS);
840 savesegment(gs, gsindex);
842 rdmsrl(MSR_KERNEL_GS_BASE, base);
844 base = task->thread.gs;
846 base = task->thread.gs;
847 ret = put_user(base, (unsigned long __user *)addr);
859 long sys_arch_prctl(int code, unsigned long addr)
861 return do_arch_prctl(current, code, addr);
864 unsigned long arch_align_stack(unsigned long sp)
866 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
867 sp -= get_random_int() % 8192;
871 unsigned long arch_randomize_brk(struct mm_struct *mm)
873 unsigned long range_end = mm->brk + 0x02000000;
874 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;