2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
19 #include <linux/cpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/utsname.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/ptrace.h>
34 #include <linux/random.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/tick.h>
39 #include <linux/prctl.h>
40 #include <linux/uaccess.h>
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
49 #include <asm/prctl.h>
51 #include <asm/proto.h>
54 #include <asm/syscalls.h>
56 asmlinkage extern void ret_from_fork(void);
58 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
60 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
62 void idle_notifier_register(struct notifier_block *n)
64 atomic_notifier_chain_register(&idle_notifier, n);
70 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
73 static void __exit_idle(void)
75 if (test_and_clear_bit_pda(0, isidle) == 0)
77 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
80 /* Called from interrupts to signify idle end */
83 /* idle loop has pid 0 */
90 static inline void play_dead(void)
97 * The idle thread. There's no useful work to be
98 * done, so just try to conserve power and have a
99 * low exit latency (ie sit in a loop waiting for
100 * somebody to say that they'd like to reschedule)
104 current_thread_info()->status |= TS_POLLING;
105 /* endless idle loop with no priority at all */
107 tick_nohz_stop_sched_tick(1);
108 while (!need_resched()) {
112 if (cpu_is_offline(smp_processor_id()))
115 * Idle routines should keep interrupts disabled
116 * from here on, until they go to idle.
117 * Otherwise, idle callbacks can misfire.
121 /* Don't trace irqs off for idle */
122 stop_critical_timings();
124 start_critical_timings();
125 /* In many cases the interrupt that ended idle
126 has already called exit_idle. But some idle
127 loops can be woken up without interrupt. */
131 tick_nohz_restart_sched_tick();
132 preempt_enable_no_resched();
138 /* Prints also some state that isn't saved in the pt_regs */
139 void __show_regs(struct pt_regs *regs)
141 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
142 unsigned long d0, d1, d2, d3, d6, d7;
143 unsigned int fsindex, gsindex;
144 unsigned int ds, cs, es;
148 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
149 current->pid, current->comm, print_tainted(),
150 init_utsname()->release,
151 (int)strcspn(init_utsname()->version, " "),
152 init_utsname()->version);
153 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
154 printk_address(regs->ip, 1);
155 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
156 regs->sp, regs->flags);
157 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
158 regs->ax, regs->bx, regs->cx);
159 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
160 regs->dx, regs->si, regs->di);
161 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
162 regs->bp, regs->r8, regs->r9);
163 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
164 regs->r10, regs->r11, regs->r12);
165 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
166 regs->r13, regs->r14, regs->r15);
168 asm("movl %%ds,%0" : "=r" (ds));
169 asm("movl %%cs,%0" : "=r" (cs));
170 asm("movl %%es,%0" : "=r" (es));
171 asm("movl %%fs,%0" : "=r" (fsindex));
172 asm("movl %%gs,%0" : "=r" (gsindex));
174 rdmsrl(MSR_FS_BASE, fs);
175 rdmsrl(MSR_GS_BASE, gs);
176 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
183 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
184 fs, fsindex, gs, gsindex, shadowgs);
185 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
187 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
193 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
197 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
200 void show_regs(struct pt_regs *regs)
202 printk(KERN_INFO "CPU %d:", smp_processor_id());
204 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
208 * Free current thread data structures etc..
210 void exit_thread(void)
212 struct task_struct *me = current;
213 struct thread_struct *t = &me->thread;
215 if (me->thread.io_bitmap_ptr) {
216 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
218 kfree(t->io_bitmap_ptr);
219 t->io_bitmap_ptr = NULL;
220 clear_thread_flag(TIF_IO_BITMAP);
222 * Careful, clear this in the TSS too:
224 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
225 t->io_bitmap_max = 0;
229 /* Free any DS contexts that have not been properly released. */
230 if (unlikely(t->ds_ctx)) {
231 /* we clear debugctl to make sure DS is not used. */
232 update_debugctlmsr(0);
235 #endif /* CONFIG_X86_DS */
238 void flush_thread(void)
240 struct task_struct *tsk = current;
242 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
243 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
244 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
245 clear_tsk_thread_flag(tsk, TIF_IA32);
247 set_tsk_thread_flag(tsk, TIF_IA32);
248 current_thread_info()->status |= TS_COMPAT;
251 clear_tsk_thread_flag(tsk, TIF_DEBUG);
253 tsk->thread.debugreg0 = 0;
254 tsk->thread.debugreg1 = 0;
255 tsk->thread.debugreg2 = 0;
256 tsk->thread.debugreg3 = 0;
257 tsk->thread.debugreg6 = 0;
258 tsk->thread.debugreg7 = 0;
259 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
261 * Forget coprocessor state..
263 tsk->fpu_counter = 0;
268 void release_thread(struct task_struct *dead_task)
271 if (dead_task->mm->context.size) {
272 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
274 dead_task->mm->context.ldt,
275 dead_task->mm->context.size);
281 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
283 struct user_desc ud = {
290 struct desc_struct *desc = t->thread.tls_array;
295 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
297 return get_desc_base(&t->thread.tls_array[tls]);
301 * This gets called before we allocate a new thread and copy
302 * the current task into it.
304 void prepare_to_copy(struct task_struct *tsk)
309 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
310 unsigned long unused,
311 struct task_struct *p, struct pt_regs *regs)
314 struct pt_regs *childregs;
315 struct task_struct *me = current;
317 childregs = ((struct pt_regs *)
318 (THREAD_SIZE + task_stack_page(p))) - 1;
324 childregs->sp = (unsigned long)childregs;
326 p->thread.sp = (unsigned long) childregs;
327 p->thread.sp0 = (unsigned long) (childregs+1);
328 p->thread.usersp = me->thread.usersp;
330 set_tsk_thread_flag(p, TIF_FORK);
332 p->thread.fs = me->thread.fs;
333 p->thread.gs = me->thread.gs;
335 savesegment(gs, p->thread.gsindex);
336 savesegment(fs, p->thread.fsindex);
337 savesegment(es, p->thread.es);
338 savesegment(ds, p->thread.ds);
340 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
341 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
342 if (!p->thread.io_bitmap_ptr) {
343 p->thread.io_bitmap_max = 0;
346 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
348 set_tsk_thread_flag(p, TIF_IO_BITMAP);
352 * Set a new TLS for the child thread?
354 if (clone_flags & CLONE_SETTLS) {
355 #ifdef CONFIG_IA32_EMULATION
356 if (test_thread_flag(TIF_IA32))
357 err = do_set_thread_area(p, -1,
358 (struct user_desc __user *)childregs->si, 0);
361 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
367 if (err && p->thread.io_bitmap_ptr) {
368 kfree(p->thread.io_bitmap_ptr);
369 p->thread.io_bitmap_max = 0;
375 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
383 write_pda(oldrsp, new_sp);
384 regs->cs = __USER_CS;
385 regs->ss = __USER_DS;
389 * Free the old FP and other extended state
391 free_thread_xstate(current);
393 EXPORT_SYMBOL_GPL(start_thread);
395 static void hard_disable_TSC(void)
397 write_cr4(read_cr4() | X86_CR4_TSD);
400 void disable_TSC(void)
403 if (!test_and_set_thread_flag(TIF_NOTSC))
405 * Must flip the CPU state synchronously with
406 * TIF_NOTSC in the current running context.
412 static void hard_enable_TSC(void)
414 write_cr4(read_cr4() & ~X86_CR4_TSD);
417 static void enable_TSC(void)
420 if (test_and_clear_thread_flag(TIF_NOTSC))
422 * Must flip the CPU state synchronously with
423 * TIF_NOTSC in the current running context.
429 int get_tsc_mode(unsigned long adr)
433 if (test_thread_flag(TIF_NOTSC))
434 val = PR_TSC_SIGSEGV;
438 return put_user(val, (unsigned int __user *)adr);
441 int set_tsc_mode(unsigned int val)
443 if (val == PR_TSC_SIGSEGV)
445 else if (val == PR_TSC_ENABLE)
454 * This special macro can be used to load a debugging register
456 #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
458 static inline void __switch_to_xtra(struct task_struct *prev_p,
459 struct task_struct *next_p,
460 struct tss_struct *tss)
462 struct thread_struct *prev, *next;
463 unsigned long debugctl;
465 prev = &prev_p->thread,
466 next = &next_p->thread;
468 debugctl = prev->debugctlmsr;
472 unsigned long ds_prev = 0, ds_next = 0;
475 ds_prev = (unsigned long)prev->ds_ctx->ds;
477 ds_next = (unsigned long)next->ds_ctx->ds;
479 if (ds_next != ds_prev) {
481 * We clear debugctl to make sure DS
482 * is not in use when we change it:
485 update_debugctlmsr(0);
486 wrmsrl(MSR_IA32_DS_AREA, ds_next);
489 #endif /* CONFIG_X86_DS */
491 if (next->debugctlmsr != debugctl)
492 update_debugctlmsr(next->debugctlmsr);
494 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
504 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
505 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
506 /* prev and next are different */
507 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
513 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
515 * Copy the relevant range of the IO bitmap.
516 * Normally this is 128 bytes or less:
518 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
519 max(prev->io_bitmap_max, next->io_bitmap_max));
520 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
522 * Clear any possible leftover bits:
524 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
527 #ifdef CONFIG_X86_PTRACE_BTS
528 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
529 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
531 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
532 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
533 #endif /* CONFIG_X86_PTRACE_BTS */
537 * switch_to(x,y) should switch tasks from x to y.
539 * This could still be optimized:
540 * - fold all the options into a flag word and test it with a single test.
541 * - could test fs/gs bitsliced
543 * Kprobes not supported here. Set the probe on schedule instead.
546 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
548 struct thread_struct *prev = &prev_p->thread;
549 struct thread_struct *next = &next_p->thread;
550 int cpu = smp_processor_id();
551 struct tss_struct *tss = &per_cpu(init_tss, cpu);
552 unsigned fsindex, gsindex;
554 /* we're going to use this soon, after a few expensive things */
555 if (next_p->fpu_counter > 5)
556 prefetch(next->xstate);
559 * Reload esp0, LDT and the page table pointer:
565 * This won't pick up thread selector changes, but I guess that is ok.
567 savesegment(es, prev->es);
568 if (unlikely(next->es | prev->es))
569 loadsegment(es, next->es);
571 savesegment(ds, prev->ds);
572 if (unlikely(next->ds | prev->ds))
573 loadsegment(ds, next->ds);
576 /* We must save %fs and %gs before load_TLS() because
577 * %fs and %gs may be cleared by load_TLS().
579 * (e.g. xen_load_tls())
581 savesegment(fs, fsindex);
582 savesegment(gs, gsindex);
587 * Leave lazy mode, flushing any hypercalls made here.
588 * This must be done before restoring TLS segments so
589 * the GDT and LDT are properly updated, and must be
590 * done before math_state_restore, so the TS bit is up
593 arch_leave_lazy_cpu_mode();
598 * Segment register != 0 always requires a reload. Also
599 * reload when it has changed. When prev process used 64bit
600 * base always reload to avoid an information leak.
602 if (unlikely(fsindex | next->fsindex | prev->fs)) {
603 loadsegment(fs, next->fsindex);
605 * Check if the user used a selector != 0; if yes
606 * clear 64bit base, since overloaded base is always
607 * mapped to the Null selector
612 /* when next process has a 64bit base use it */
614 wrmsrl(MSR_FS_BASE, next->fs);
615 prev->fsindex = fsindex;
617 if (unlikely(gsindex | next->gsindex | prev->gs)) {
618 load_gs_index(next->gsindex);
623 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
624 prev->gsindex = gsindex;
626 /* Must be after DS reload */
630 * Switch the PDA and FPU contexts.
632 prev->usersp = read_pda(oldrsp);
633 write_pda(oldrsp, next->usersp);
634 write_pda(pcurrent, next_p);
636 write_pda(kernelstack,
637 (unsigned long)task_stack_page(next_p) +
638 THREAD_SIZE - PDA_STACKOFFSET);
639 #ifdef CONFIG_CC_STACKPROTECTOR
640 write_pda(stack_canary, next_p->stack_canary);
642 * Build time only check to make sure the stack_canary is at
643 * offset 40 in the pda; this is a gcc ABI requirement
645 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
649 * Now maybe reload the debug registers and handle I/O bitmaps
651 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
652 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
653 __switch_to_xtra(prev_p, next_p, tss);
655 /* If the task has used fpu the last 5 timeslices, just do a full
656 * restore of the math state immediately to avoid the trap; the
657 * chances of needing FPU soon are obviously high now
659 * tsk_used_math() checks prevent calling math_state_restore(),
660 * which can sleep in the case of !tsk_used_math()
662 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
663 math_state_restore();
668 * sys_execve() executes a new program.
671 long sys_execve(char __user *name, char __user * __user *argv,
672 char __user * __user *envp, struct pt_regs *regs)
677 filename = getname(name);
678 error = PTR_ERR(filename);
679 if (IS_ERR(filename))
681 error = do_execve(filename, argv, envp, regs);
686 void set_personality_64bit(void)
688 /* inherit personality from parent */
690 /* Make sure to be in 64bit mode */
691 clear_thread_flag(TIF_IA32);
693 /* TBD: overwrites user setup. Should have two bits.
694 But 64bit processes have always behaved this way,
695 so it's not too bad. The main problem is just that
696 32bit childs are affected again. */
697 current->personality &= ~READ_IMPLIES_EXEC;
700 asmlinkage long sys_fork(struct pt_regs *regs)
702 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
706 sys_clone(unsigned long clone_flags, unsigned long newsp,
707 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
711 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
715 * This is trivial, and on the face of it looks like it
716 * could equally well be done in user mode.
718 * Not so, for quite unobvious reasons - register pressure.
719 * In user mode vfork() cannot have a stack frame, and if
720 * done by calling the "clone()" system call directly, you
721 * do not have enough call-clobbered registers to hold all
722 * the information you need.
724 asmlinkage long sys_vfork(struct pt_regs *regs)
726 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
730 unsigned long get_wchan(struct task_struct *p)
736 if (!p || p == current || p->state == TASK_RUNNING)
738 stack = (unsigned long)task_stack_page(p);
739 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
741 fp = *(u64 *)(p->thread.sp);
743 if (fp < (unsigned long)stack ||
744 fp >= (unsigned long)stack+THREAD_SIZE)
747 if (!in_sched_functions(ip))
750 } while (count++ < 16);
754 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
757 int doit = task == current;
762 if (addr >= TASK_SIZE_OF(task))
765 /* handle small bases via the GDT because that's faster to
767 if (addr <= 0xffffffff) {
768 set_32bit_tls(task, GS_TLS, addr);
770 load_TLS(&task->thread, cpu);
771 load_gs_index(GS_TLS_SEL);
773 task->thread.gsindex = GS_TLS_SEL;
776 task->thread.gsindex = 0;
777 task->thread.gs = addr;
780 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
786 /* Not strictly needed for fs, but do it for symmetry
788 if (addr >= TASK_SIZE_OF(task))
791 /* handle small bases via the GDT because that's faster to
793 if (addr <= 0xffffffff) {
794 set_32bit_tls(task, FS_TLS, addr);
796 load_TLS(&task->thread, cpu);
797 loadsegment(fs, FS_TLS_SEL);
799 task->thread.fsindex = FS_TLS_SEL;
802 task->thread.fsindex = 0;
803 task->thread.fs = addr;
805 /* set the selector to 0 to not confuse
808 ret = checking_wrmsrl(MSR_FS_BASE, addr);
815 if (task->thread.fsindex == FS_TLS_SEL)
816 base = read_32bit_tls(task, FS_TLS);
818 rdmsrl(MSR_FS_BASE, base);
820 base = task->thread.fs;
821 ret = put_user(base, (unsigned long __user *)addr);
827 if (task->thread.gsindex == GS_TLS_SEL)
828 base = read_32bit_tls(task, GS_TLS);
830 savesegment(gs, gsindex);
832 rdmsrl(MSR_KERNEL_GS_BASE, base);
834 base = task->thread.gs;
836 base = task->thread.gs;
837 ret = put_user(base, (unsigned long __user *)addr);
849 long sys_arch_prctl(int code, unsigned long addr)
851 return do_arch_prctl(current, code, addr);
854 unsigned long arch_align_stack(unsigned long sp)
856 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
857 sp -= get_random_int() % 8192;
861 unsigned long arch_randomize_brk(struct mm_struct *mm)
863 unsigned long range_end = mm->brk + 0x02000000;
864 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;