2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
19 #include <linux/cpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/utsname.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/ptrace.h>
34 #include <linux/random.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/tick.h>
39 #include <linux/prctl.h>
40 #include <linux/uaccess.h>
42 #include <linux/ftrace.h>
44 #include <asm/pgtable.h>
45 #include <asm/system.h>
46 #include <asm/processor.h>
48 #include <asm/mmu_context.h>
50 #include <asm/prctl.h>
52 #include <asm/proto.h>
55 #include <asm/syscalls.h>
58 asmlinkage extern void ret_from_fork(void);
60 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
62 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
64 void idle_notifier_register(struct notifier_block *n)
66 atomic_notifier_chain_register(&idle_notifier, n);
68 EXPORT_SYMBOL_GPL(idle_notifier_register);
70 void idle_notifier_unregister(struct notifier_block *n)
72 atomic_notifier_chain_unregister(&idle_notifier, n);
74 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
79 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
82 static void __exit_idle(void)
84 if (test_and_clear_bit_pda(0, isidle) == 0)
86 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
89 /* Called from interrupts to signify idle end */
92 /* idle loop has pid 0 */
99 static inline void play_dead(void)
106 * The idle thread. There's no useful work to be
107 * done, so just try to conserve power and have a
108 * low exit latency (ie sit in a loop waiting for
109 * somebody to say that they'd like to reschedule)
113 current_thread_info()->status |= TS_POLLING;
114 /* endless idle loop with no priority at all */
116 tick_nohz_stop_sched_tick(1);
117 while (!need_resched()) {
121 if (cpu_is_offline(smp_processor_id()))
124 * Idle routines should keep interrupts disabled
125 * from here on, until they go to idle.
126 * Otherwise, idle callbacks can misfire.
130 /* Don't trace irqs off for idle */
131 stop_critical_timings();
133 start_critical_timings();
134 /* In many cases the interrupt that ended idle
135 has already called exit_idle. But some idle
136 loops can be woken up without interrupt. */
140 tick_nohz_restart_sched_tick();
141 preempt_enable_no_resched();
147 /* Prints also some state that isn't saved in the pt_regs */
148 void __show_regs(struct pt_regs *regs, int all)
150 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
151 unsigned long d0, d1, d2, d3, d6, d7;
152 unsigned int fsindex, gsindex;
153 unsigned int ds, cs, es;
157 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
158 current->pid, current->comm, print_tainted(),
159 init_utsname()->release,
160 (int)strcspn(init_utsname()->version, " "),
161 init_utsname()->version);
162 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
163 printk_address(regs->ip, 1);
164 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
165 regs->sp, regs->flags);
166 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
167 regs->ax, regs->bx, regs->cx);
168 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
169 regs->dx, regs->si, regs->di);
170 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
171 regs->bp, regs->r8, regs->r9);
172 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
173 regs->r10, regs->r11, regs->r12);
174 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
175 regs->r13, regs->r14, regs->r15);
177 asm("movl %%ds,%0" : "=r" (ds));
178 asm("movl %%cs,%0" : "=r" (cs));
179 asm("movl %%es,%0" : "=r" (es));
180 asm("movl %%fs,%0" : "=r" (fsindex));
181 asm("movl %%gs,%0" : "=r" (gsindex));
183 rdmsrl(MSR_FS_BASE, fs);
184 rdmsrl(MSR_GS_BASE, gs);
185 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
195 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
196 fs, fsindex, gs, gsindex, shadowgs);
197 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
199 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
205 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
209 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
212 void show_regs(struct pt_regs *regs)
214 printk(KERN_INFO "CPU %d:", smp_processor_id());
215 __show_regs(regs, 1);
216 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
220 * Free current thread data structures etc..
222 void exit_thread(void)
224 struct task_struct *me = current;
225 struct thread_struct *t = &me->thread;
227 if (me->thread.io_bitmap_ptr) {
228 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
230 kfree(t->io_bitmap_ptr);
231 t->io_bitmap_ptr = NULL;
232 clear_thread_flag(TIF_IO_BITMAP);
234 * Careful, clear this in the TSS too:
236 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
237 t->io_bitmap_max = 0;
241 ds_exit_thread(current);
244 void flush_thread(void)
246 struct task_struct *tsk = current;
248 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
249 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
250 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
251 clear_tsk_thread_flag(tsk, TIF_IA32);
253 set_tsk_thread_flag(tsk, TIF_IA32);
254 current_thread_info()->status |= TS_COMPAT;
257 clear_tsk_thread_flag(tsk, TIF_DEBUG);
259 tsk->thread.debugreg0 = 0;
260 tsk->thread.debugreg1 = 0;
261 tsk->thread.debugreg2 = 0;
262 tsk->thread.debugreg3 = 0;
263 tsk->thread.debugreg6 = 0;
264 tsk->thread.debugreg7 = 0;
265 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
267 * Forget coprocessor state..
269 tsk->fpu_counter = 0;
274 void release_thread(struct task_struct *dead_task)
277 if (dead_task->mm->context.size) {
278 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
280 dead_task->mm->context.ldt,
281 dead_task->mm->context.size);
287 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
289 struct user_desc ud = {
296 struct desc_struct *desc = t->thread.tls_array;
301 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
303 return get_desc_base(&t->thread.tls_array[tls]);
307 * This gets called before we allocate a new thread and copy
308 * the current task into it.
310 void prepare_to_copy(struct task_struct *tsk)
315 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
316 unsigned long unused,
317 struct task_struct *p, struct pt_regs *regs)
320 struct pt_regs *childregs;
321 struct task_struct *me = current;
323 childregs = ((struct pt_regs *)
324 (THREAD_SIZE + task_stack_page(p))) - 1;
330 childregs->sp = (unsigned long)childregs;
332 p->thread.sp = (unsigned long) childregs;
333 p->thread.sp0 = (unsigned long) (childregs+1);
334 p->thread.usersp = me->thread.usersp;
336 set_tsk_thread_flag(p, TIF_FORK);
338 p->thread.fs = me->thread.fs;
339 p->thread.gs = me->thread.gs;
341 savesegment(gs, p->thread.gsindex);
342 savesegment(fs, p->thread.fsindex);
343 savesegment(es, p->thread.es);
344 savesegment(ds, p->thread.ds);
346 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
347 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
348 if (!p->thread.io_bitmap_ptr) {
349 p->thread.io_bitmap_max = 0;
352 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
354 set_tsk_thread_flag(p, TIF_IO_BITMAP);
358 * Set a new TLS for the child thread?
360 if (clone_flags & CLONE_SETTLS) {
361 #ifdef CONFIG_IA32_EMULATION
362 if (test_thread_flag(TIF_IA32))
363 err = do_set_thread_area(p, -1,
364 (struct user_desc __user *)childregs->si, 0);
367 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
372 ds_copy_thread(p, me);
374 clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
375 p->thread.debugctlmsr = 0;
379 if (err && p->thread.io_bitmap_ptr) {
380 kfree(p->thread.io_bitmap_ptr);
381 p->thread.io_bitmap_max = 0;
387 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
395 write_pda(oldrsp, new_sp);
396 regs->cs = __USER_CS;
397 regs->ss = __USER_DS;
401 * Free the old FP and other extended state
403 free_thread_xstate(current);
405 EXPORT_SYMBOL_GPL(start_thread);
407 static void hard_disable_TSC(void)
409 write_cr4(read_cr4() | X86_CR4_TSD);
412 void disable_TSC(void)
415 if (!test_and_set_thread_flag(TIF_NOTSC))
417 * Must flip the CPU state synchronously with
418 * TIF_NOTSC in the current running context.
424 static void hard_enable_TSC(void)
426 write_cr4(read_cr4() & ~X86_CR4_TSD);
429 static void enable_TSC(void)
432 if (test_and_clear_thread_flag(TIF_NOTSC))
434 * Must flip the CPU state synchronously with
435 * TIF_NOTSC in the current running context.
441 int get_tsc_mode(unsigned long adr)
445 if (test_thread_flag(TIF_NOTSC))
446 val = PR_TSC_SIGSEGV;
450 return put_user(val, (unsigned int __user *)adr);
453 int set_tsc_mode(unsigned int val)
455 if (val == PR_TSC_SIGSEGV)
457 else if (val == PR_TSC_ENABLE)
466 * This special macro can be used to load a debugging register
468 #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
470 static inline void __switch_to_xtra(struct task_struct *prev_p,
471 struct task_struct *next_p,
472 struct tss_struct *tss)
474 struct thread_struct *prev, *next;
476 prev = &prev_p->thread,
477 next = &next_p->thread;
479 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
480 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
481 ds_switch_to(prev_p, next_p);
482 else if (next->debugctlmsr != prev->debugctlmsr)
483 update_debugctlmsr(next->debugctlmsr);
485 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
495 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
496 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
497 /* prev and next are different */
498 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
504 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
506 * Copy the relevant range of the IO bitmap.
507 * Normally this is 128 bytes or less:
509 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
510 max(prev->io_bitmap_max, next->io_bitmap_max));
511 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
513 * Clear any possible leftover bits:
515 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
520 * switch_to(x,y) should switch tasks from x to y.
522 * This could still be optimized:
523 * - fold all the options into a flag word and test it with a single test.
524 * - could test fs/gs bitsliced
526 * Kprobes not supported here. Set the probe on schedule instead.
527 * Function graph tracer not supported too.
529 __notrace_funcgraph struct task_struct *
530 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
532 struct thread_struct *prev = &prev_p->thread;
533 struct thread_struct *next = &next_p->thread;
534 int cpu = smp_processor_id();
535 struct tss_struct *tss = &per_cpu(init_tss, cpu);
536 unsigned fsindex, gsindex;
538 /* we're going to use this soon, after a few expensive things */
539 if (next_p->fpu_counter > 5)
540 prefetch(next->xstate);
543 * Reload esp0, LDT and the page table pointer:
549 * This won't pick up thread selector changes, but I guess that is ok.
551 savesegment(es, prev->es);
552 if (unlikely(next->es | prev->es))
553 loadsegment(es, next->es);
555 savesegment(ds, prev->ds);
556 if (unlikely(next->ds | prev->ds))
557 loadsegment(ds, next->ds);
560 /* We must save %fs and %gs before load_TLS() because
561 * %fs and %gs may be cleared by load_TLS().
563 * (e.g. xen_load_tls())
565 savesegment(fs, fsindex);
566 savesegment(gs, gsindex);
571 * Leave lazy mode, flushing any hypercalls made here.
572 * This must be done before restoring TLS segments so
573 * the GDT and LDT are properly updated, and must be
574 * done before math_state_restore, so the TS bit is up
577 arch_leave_lazy_cpu_mode();
582 * Segment register != 0 always requires a reload. Also
583 * reload when it has changed. When prev process used 64bit
584 * base always reload to avoid an information leak.
586 if (unlikely(fsindex | next->fsindex | prev->fs)) {
587 loadsegment(fs, next->fsindex);
589 * Check if the user used a selector != 0; if yes
590 * clear 64bit base, since overloaded base is always
591 * mapped to the Null selector
596 /* when next process has a 64bit base use it */
598 wrmsrl(MSR_FS_BASE, next->fs);
599 prev->fsindex = fsindex;
601 if (unlikely(gsindex | next->gsindex | prev->gs)) {
602 load_gs_index(next->gsindex);
607 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
608 prev->gsindex = gsindex;
610 /* Must be after DS reload */
614 * Switch the PDA and FPU contexts.
616 prev->usersp = read_pda(oldrsp);
617 write_pda(oldrsp, next->usersp);
618 write_pda(pcurrent, next_p);
620 write_pda(kernelstack,
621 (unsigned long)task_stack_page(next_p) +
622 THREAD_SIZE - PDA_STACKOFFSET);
623 #ifdef CONFIG_CC_STACKPROTECTOR
624 write_pda(stack_canary, next_p->stack_canary);
626 * Build time only check to make sure the stack_canary is at
627 * offset 40 in the pda; this is a gcc ABI requirement
629 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
633 * Now maybe reload the debug registers and handle I/O bitmaps
635 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
636 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
637 __switch_to_xtra(prev_p, next_p, tss);
639 /* If the task has used fpu the last 5 timeslices, just do a full
640 * restore of the math state immediately to avoid the trap; the
641 * chances of needing FPU soon are obviously high now
643 * tsk_used_math() checks prevent calling math_state_restore(),
644 * which can sleep in the case of !tsk_used_math()
646 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
647 math_state_restore();
652 * sys_execve() executes a new program.
655 long sys_execve(char __user *name, char __user * __user *argv,
656 char __user * __user *envp, struct pt_regs *regs)
661 filename = getname(name);
662 error = PTR_ERR(filename);
663 if (IS_ERR(filename))
665 error = do_execve(filename, argv, envp, regs);
670 void set_personality_64bit(void)
672 /* inherit personality from parent */
674 /* Make sure to be in 64bit mode */
675 clear_thread_flag(TIF_IA32);
677 /* TBD: overwrites user setup. Should have two bits.
678 But 64bit processes have always behaved this way,
679 so it's not too bad. The main problem is just that
680 32bit childs are affected again. */
681 current->personality &= ~READ_IMPLIES_EXEC;
684 asmlinkage long sys_fork(struct pt_regs *regs)
686 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
690 sys_clone(unsigned long clone_flags, unsigned long newsp,
691 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
695 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
699 * This is trivial, and on the face of it looks like it
700 * could equally well be done in user mode.
702 * Not so, for quite unobvious reasons - register pressure.
703 * In user mode vfork() cannot have a stack frame, and if
704 * done by calling the "clone()" system call directly, you
705 * do not have enough call-clobbered registers to hold all
706 * the information you need.
708 asmlinkage long sys_vfork(struct pt_regs *regs)
710 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
714 unsigned long get_wchan(struct task_struct *p)
720 if (!p || p == current || p->state == TASK_RUNNING)
722 stack = (unsigned long)task_stack_page(p);
723 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
725 fp = *(u64 *)(p->thread.sp);
727 if (fp < (unsigned long)stack ||
728 fp >= (unsigned long)stack+THREAD_SIZE)
731 if (!in_sched_functions(ip))
734 } while (count++ < 16);
738 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
741 int doit = task == current;
746 if (addr >= TASK_SIZE_OF(task))
749 /* handle small bases via the GDT because that's faster to
751 if (addr <= 0xffffffff) {
752 set_32bit_tls(task, GS_TLS, addr);
754 load_TLS(&task->thread, cpu);
755 load_gs_index(GS_TLS_SEL);
757 task->thread.gsindex = GS_TLS_SEL;
760 task->thread.gsindex = 0;
761 task->thread.gs = addr;
764 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
770 /* Not strictly needed for fs, but do it for symmetry
772 if (addr >= TASK_SIZE_OF(task))
775 /* handle small bases via the GDT because that's faster to
777 if (addr <= 0xffffffff) {
778 set_32bit_tls(task, FS_TLS, addr);
780 load_TLS(&task->thread, cpu);
781 loadsegment(fs, FS_TLS_SEL);
783 task->thread.fsindex = FS_TLS_SEL;
786 task->thread.fsindex = 0;
787 task->thread.fs = addr;
789 /* set the selector to 0 to not confuse
792 ret = checking_wrmsrl(MSR_FS_BASE, addr);
799 if (task->thread.fsindex == FS_TLS_SEL)
800 base = read_32bit_tls(task, FS_TLS);
802 rdmsrl(MSR_FS_BASE, base);
804 base = task->thread.fs;
805 ret = put_user(base, (unsigned long __user *)addr);
811 if (task->thread.gsindex == GS_TLS_SEL)
812 base = read_32bit_tls(task, GS_TLS);
814 savesegment(gs, gsindex);
816 rdmsrl(MSR_KERNEL_GS_BASE, base);
818 base = task->thread.gs;
820 base = task->thread.gs;
821 ret = put_user(base, (unsigned long __user *)addr);
833 long sys_arch_prctl(int code, unsigned long addr)
835 return do_arch_prctl(current, code, addr);
838 unsigned long arch_align_stack(unsigned long sp)
840 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
841 sp -= get_random_int() % 8192;
845 unsigned long arch_randomize_brk(struct mm_struct *mm)
847 unsigned long range_end = mm->brk + 0x02000000;
848 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;