2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
19 #include <linux/cpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/utsname.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/ptrace.h>
34 #include <linux/random.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/tick.h>
39 #include <linux/prctl.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
49 #include <asm/prctl.h>
51 #include <asm/proto.h>
55 asmlinkage extern void ret_from_fork(void);
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
61 void idle_notifier_register(struct notifier_block *n)
63 atomic_notifier_chain_register(&idle_notifier, n);
69 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
72 static void __exit_idle(void)
74 if (test_and_clear_bit_pda(0, isidle) == 0)
76 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
79 /* Called from interrupts to signify idle end */
82 /* idle loop has pid 0 */
88 #ifdef CONFIG_HOTPLUG_CPU
89 DECLARE_PER_CPU(int, cpu_state);
92 /* We halt the CPU with physical CPU hotplug */
93 static inline void play_dead(void)
99 __get_cpu_var(cpu_state) = CPU_DEAD;
106 static inline void play_dead(void)
110 #endif /* CONFIG_HOTPLUG_CPU */
113 * The idle thread. There's no useful work to be
114 * done, so just try to conserve power and have a
115 * low exit latency (ie sit in a loop waiting for
116 * somebody to say that they'd like to reschedule)
120 current_thread_info()->status |= TS_POLLING;
121 /* endless idle loop with no priority at all */
123 tick_nohz_stop_sched_tick();
124 while (!need_resched()) {
128 if (cpu_is_offline(smp_processor_id()))
131 * Idle routines should keep interrupts disabled
132 * from here on, until they go to idle.
133 * Otherwise, idle callbacks can misfire.
138 /* In many cases the interrupt that ended idle
139 has already called exit_idle. But some idle
140 loops can be woken up without interrupt. */
144 tick_nohz_restart_sched_tick();
145 preempt_enable_no_resched();
151 /* Prints also some state that isn't saved in the pt_regs */
152 void __show_regs(struct pt_regs * regs)
154 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
155 unsigned long d0, d1, d2, d3, d6, d7;
156 unsigned int fsindex, gsindex;
157 unsigned int ds, cs, es;
161 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
162 current->pid, current->comm, print_tainted(),
163 init_utsname()->release,
164 (int)strcspn(init_utsname()->version, " "),
165 init_utsname()->version);
166 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
167 printk_address(regs->ip, 1);
168 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp,
170 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
171 regs->ax, regs->bx, regs->cx);
172 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
173 regs->dx, regs->si, regs->di);
174 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
175 regs->bp, regs->r8, regs->r9);
176 printk("R10: %016lx R11: %016lx R12: %016lx\n",
177 regs->r10, regs->r11, regs->r12);
178 printk("R13: %016lx R14: %016lx R15: %016lx\n",
179 regs->r13, regs->r14, regs->r15);
181 asm("movl %%ds,%0" : "=r" (ds));
182 asm("movl %%cs,%0" : "=r" (cs));
183 asm("movl %%es,%0" : "=r" (es));
184 asm("movl %%fs,%0" : "=r" (fsindex));
185 asm("movl %%gs,%0" : "=r" (gsindex));
187 rdmsrl(MSR_FS_BASE, fs);
188 rdmsrl(MSR_GS_BASE, gs);
189 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
196 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
197 fs,fsindex,gs,gsindex,shadowgs);
198 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
199 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
204 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
208 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
211 void show_regs(struct pt_regs *regs)
213 printk("CPU %d:", smp_processor_id());
215 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
219 * Free current thread data structures etc..
221 void exit_thread(void)
223 struct task_struct *me = current;
224 struct thread_struct *t = &me->thread;
226 if (me->thread.io_bitmap_ptr) {
227 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
229 kfree(t->io_bitmap_ptr);
230 t->io_bitmap_ptr = NULL;
231 clear_thread_flag(TIF_IO_BITMAP);
233 * Careful, clear this in the TSS too:
235 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
236 t->io_bitmap_max = 0;
241 void flush_thread(void)
243 struct task_struct *tsk = current;
245 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
246 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
247 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
248 clear_tsk_thread_flag(tsk, TIF_IA32);
250 set_tsk_thread_flag(tsk, TIF_IA32);
251 current_thread_info()->status |= TS_COMPAT;
254 clear_tsk_thread_flag(tsk, TIF_DEBUG);
256 tsk->thread.debugreg0 = 0;
257 tsk->thread.debugreg1 = 0;
258 tsk->thread.debugreg2 = 0;
259 tsk->thread.debugreg3 = 0;
260 tsk->thread.debugreg6 = 0;
261 tsk->thread.debugreg7 = 0;
262 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
264 * Forget coprocessor state..
266 tsk->fpu_counter = 0;
271 void release_thread(struct task_struct *dead_task)
274 if (dead_task->mm->context.size) {
275 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
277 dead_task->mm->context.ldt,
278 dead_task->mm->context.size);
284 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
286 struct user_desc ud = {
293 struct desc_struct *desc = t->thread.tls_array;
298 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
300 return get_desc_base(&t->thread.tls_array[tls]);
304 * This gets called before we allocate a new thread and copy
305 * the current task into it.
307 void prepare_to_copy(struct task_struct *tsk)
312 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
313 unsigned long unused,
314 struct task_struct * p, struct pt_regs * regs)
317 struct pt_regs * childregs;
318 struct task_struct *me = current;
320 childregs = ((struct pt_regs *)
321 (THREAD_SIZE + task_stack_page(p))) - 1;
327 childregs->sp = (unsigned long)childregs;
329 p->thread.sp = (unsigned long) childregs;
330 p->thread.sp0 = (unsigned long) (childregs+1);
331 p->thread.usersp = me->thread.usersp;
333 set_tsk_thread_flag(p, TIF_FORK);
335 p->thread.fs = me->thread.fs;
336 p->thread.gs = me->thread.gs;
338 savesegment(gs, p->thread.gsindex);
339 savesegment(fs, p->thread.fsindex);
340 savesegment(es, p->thread.es);
341 savesegment(ds, p->thread.ds);
343 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
344 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
345 if (!p->thread.io_bitmap_ptr) {
346 p->thread.io_bitmap_max = 0;
349 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
351 set_tsk_thread_flag(p, TIF_IO_BITMAP);
355 * Set a new TLS for the child thread?
357 if (clone_flags & CLONE_SETTLS) {
358 #ifdef CONFIG_IA32_EMULATION
359 if (test_thread_flag(TIF_IA32))
360 err = do_set_thread_area(p, -1,
361 (struct user_desc __user *)childregs->si, 0);
364 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
370 if (err && p->thread.io_bitmap_ptr) {
371 kfree(p->thread.io_bitmap_ptr);
372 p->thread.io_bitmap_max = 0;
378 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
386 write_pda(oldrsp, new_sp);
387 regs->cs = __USER_CS;
388 regs->ss = __USER_DS;
392 * Free the old FP and other extended state
394 free_thread_xstate(current);
396 EXPORT_SYMBOL_GPL(start_thread);
398 static void hard_disable_TSC(void)
400 write_cr4(read_cr4() | X86_CR4_TSD);
403 void disable_TSC(void)
406 if (!test_and_set_thread_flag(TIF_NOTSC))
408 * Must flip the CPU state synchronously with
409 * TIF_NOTSC in the current running context.
415 static void hard_enable_TSC(void)
417 write_cr4(read_cr4() & ~X86_CR4_TSD);
420 static void enable_TSC(void)
423 if (test_and_clear_thread_flag(TIF_NOTSC))
425 * Must flip the CPU state synchronously with
426 * TIF_NOTSC in the current running context.
432 int get_tsc_mode(unsigned long adr)
436 if (test_thread_flag(TIF_NOTSC))
437 val = PR_TSC_SIGSEGV;
441 return put_user(val, (unsigned int __user *)adr);
444 int set_tsc_mode(unsigned int val)
446 if (val == PR_TSC_SIGSEGV)
448 else if (val == PR_TSC_ENABLE)
457 * This special macro can be used to load a debugging register
459 #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
461 static inline void __switch_to_xtra(struct task_struct *prev_p,
462 struct task_struct *next_p,
463 struct tss_struct *tss)
465 struct thread_struct *prev, *next;
466 unsigned long debugctl;
468 prev = &prev_p->thread,
469 next = &next_p->thread;
471 debugctl = prev->debugctlmsr;
472 if (next->ds_area_msr != prev->ds_area_msr) {
473 /* we clear debugctl to make sure DS
474 * is not in use when we change it */
476 update_debugctlmsr(0);
477 wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
480 if (next->debugctlmsr != debugctl)
481 update_debugctlmsr(next->debugctlmsr);
483 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
493 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
494 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
495 /* prev and next are different */
496 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
502 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
504 * Copy the relevant range of the IO bitmap.
505 * Normally this is 128 bytes or less:
507 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
508 max(prev->io_bitmap_max, next->io_bitmap_max));
509 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
511 * Clear any possible leftover bits:
513 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
517 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
518 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
520 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
521 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
526 * switch_to(x,y) should switch tasks from x to y.
528 * This could still be optimized:
529 * - fold all the options into a flag word and test it with a single test.
530 * - could test fs/gs bitsliced
532 * Kprobes not supported here. Set the probe on schedule instead.
535 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
537 struct thread_struct *prev = &prev_p->thread,
538 *next = &next_p->thread;
539 int cpu = smp_processor_id();
540 struct tss_struct *tss = &per_cpu(init_tss, cpu);
541 unsigned fsindex, gsindex;
543 /* we're going to use this soon, after a few expensive things */
544 if (next_p->fpu_counter>5)
545 prefetch(next->xstate);
548 * Reload esp0, LDT and the page table pointer:
554 * This won't pick up thread selector changes, but I guess that is ok.
556 savesegment(es, prev->es);
557 if (unlikely(next->es | prev->es))
558 loadsegment(es, next->es);
560 savesegment(ds, prev->ds);
561 if (unlikely(next->ds | prev->ds))
562 loadsegment(ds, next->ds);
565 /* We must save %fs and %gs before load_TLS() because
566 * %fs and %gs may be cleared by load_TLS().
568 * (e.g. xen_load_tls())
570 savesegment(fs, fsindex);
571 savesegment(gs, gsindex);
576 * Leave lazy mode, flushing any hypercalls made here.
577 * This must be done before restoring TLS segments so
578 * the GDT and LDT are properly updated, and must be
579 * done before math_state_restore, so the TS bit is up
582 arch_leave_lazy_cpu_mode();
588 /* segment register != 0 always requires a reload.
589 also reload when it has changed.
590 when prev process used 64bit base always reload
591 to avoid an information leak. */
592 if (unlikely(fsindex | next->fsindex | prev->fs)) {
593 loadsegment(fs, next->fsindex);
594 /* check if the user used a selector != 0
595 * if yes clear 64bit base, since overloaded base
596 * is always mapped to the Null selector
601 /* when next process has a 64bit base use it */
603 wrmsrl(MSR_FS_BASE, next->fs);
604 prev->fsindex = fsindex;
606 if (unlikely(gsindex | next->gsindex | prev->gs)) {
607 load_gs_index(next->gsindex);
612 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
613 prev->gsindex = gsindex;
616 /* Must be after DS reload */
620 * Switch the PDA and FPU contexts.
622 prev->usersp = read_pda(oldrsp);
623 write_pda(oldrsp, next->usersp);
624 write_pda(pcurrent, next_p);
626 write_pda(kernelstack,
627 (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
628 #ifdef CONFIG_CC_STACKPROTECTOR
629 write_pda(stack_canary, next_p->stack_canary);
631 * Build time only check to make sure the stack_canary is at
632 * offset 40 in the pda; this is a gcc ABI requirement
634 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
638 * Now maybe reload the debug registers and handle I/O bitmaps
640 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
641 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
642 __switch_to_xtra(prev_p, next_p, tss);
644 /* If the task has used fpu the last 5 timeslices, just do a full
645 * restore of the math state immediately to avoid the trap; the
646 * chances of needing FPU soon are obviously high now
648 * tsk_used_math() checks prevent calling math_state_restore(),
649 * which can sleep in the case of !tsk_used_math()
651 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
652 math_state_restore();
657 * sys_execve() executes a new program.
660 long sys_execve(char __user *name, char __user * __user *argv,
661 char __user * __user *envp, struct pt_regs *regs)
666 filename = getname(name);
667 error = PTR_ERR(filename);
668 if (IS_ERR(filename))
670 error = do_execve(filename, argv, envp, regs);
675 void set_personality_64bit(void)
677 /* inherit personality from parent */
679 /* Make sure to be in 64bit mode */
680 clear_thread_flag(TIF_IA32);
682 /* TBD: overwrites user setup. Should have two bits.
683 But 64bit processes have always behaved this way,
684 so it's not too bad. The main problem is just that
685 32bit childs are affected again. */
686 current->personality &= ~READ_IMPLIES_EXEC;
689 asmlinkage long sys_fork(struct pt_regs *regs)
691 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
695 sys_clone(unsigned long clone_flags, unsigned long newsp,
696 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
700 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
704 * This is trivial, and on the face of it looks like it
705 * could equally well be done in user mode.
707 * Not so, for quite unobvious reasons - register pressure.
708 * In user mode vfork() cannot have a stack frame, and if
709 * done by calling the "clone()" system call directly, you
710 * do not have enough call-clobbered registers to hold all
711 * the information you need.
713 asmlinkage long sys_vfork(struct pt_regs *regs)
715 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
719 unsigned long get_wchan(struct task_struct *p)
725 if (!p || p == current || p->state==TASK_RUNNING)
727 stack = (unsigned long)task_stack_page(p);
728 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
730 fp = *(u64 *)(p->thread.sp);
732 if (fp < (unsigned long)stack ||
733 fp > (unsigned long)stack+THREAD_SIZE)
736 if (!in_sched_functions(ip))
739 } while (count++ < 16);
743 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
746 int doit = task == current;
751 if (addr >= TASK_SIZE_OF(task))
754 /* handle small bases via the GDT because that's faster to
756 if (addr <= 0xffffffff) {
757 set_32bit_tls(task, GS_TLS, addr);
759 load_TLS(&task->thread, cpu);
760 load_gs_index(GS_TLS_SEL);
762 task->thread.gsindex = GS_TLS_SEL;
765 task->thread.gsindex = 0;
766 task->thread.gs = addr;
769 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
775 /* Not strictly needed for fs, but do it for symmetry
777 if (addr >= TASK_SIZE_OF(task))
780 /* handle small bases via the GDT because that's faster to
782 if (addr <= 0xffffffff) {
783 set_32bit_tls(task, FS_TLS, addr);
785 load_TLS(&task->thread, cpu);
786 loadsegment(fs, FS_TLS_SEL);
788 task->thread.fsindex = FS_TLS_SEL;
791 task->thread.fsindex = 0;
792 task->thread.fs = addr;
794 /* set the selector to 0 to not confuse
797 ret = checking_wrmsrl(MSR_FS_BASE, addr);
804 if (task->thread.fsindex == FS_TLS_SEL)
805 base = read_32bit_tls(task, FS_TLS);
807 rdmsrl(MSR_FS_BASE, base);
809 base = task->thread.fs;
810 ret = put_user(base, (unsigned long __user *)addr);
816 if (task->thread.gsindex == GS_TLS_SEL)
817 base = read_32bit_tls(task, GS_TLS);
819 savesegment(gs, gsindex);
821 rdmsrl(MSR_KERNEL_GS_BASE, base);
823 base = task->thread.gs;
826 base = task->thread.gs;
827 ret = put_user(base, (unsigned long __user *)addr);
839 long sys_arch_prctl(int code, unsigned long addr)
841 return do_arch_prctl(current, code, addr);
844 unsigned long arch_align_stack(unsigned long sp)
846 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
847 sp -= get_random_int() % 8192;
851 unsigned long arch_randomize_brk(struct mm_struct *mm)
853 unsigned long range_end = mm->brk + 0x02000000;
854 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;