2 * include/asm-i386/processor.h
4 * Copyright (C) 1994 Linus Torvalds
7 #ifndef __ASM_I386_PROCESSOR_H
8 #define __ASM_I386_PROCESSOR_H
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
14 #include <asm/types.h>
15 #include <asm/sigcontext.h>
16 #include <asm/cpufeature.h>
18 #include <asm/system.h>
19 #include <linux/cache.h>
20 #include <linux/threads.h>
21 #include <asm/percpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/init.h>
24 #include <asm/processor-flags.h>
26 /* flag for disabling the tsc */
27 extern int tsc_disable;
33 #define desc_empty(desc) \
34 (!((desc)->a | (desc)->b))
36 #define desc_equal(desc1, desc2) \
37 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
39 * Default implementation of macro that returns current
40 * instruction pointer ("program counter").
42 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
45 * CPU type and hardware bug flags. Kept separately for each CPU.
46 * Members of this structure are referenced in head.S, so think twice
47 * before touching them. [mj]
51 __u8 x86; /* CPU family */
52 __u8 x86_vendor; /* CPU vendor */
55 char wp_works_ok; /* It doesn't on 386's */
56 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
59 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
60 unsigned long x86_capability[NCAPINTS];
61 char x86_vendor_id[16];
62 char x86_model_id[64];
63 int x86_cache_size; /* in KB - valid for CPUS which support this
65 int x86_cache_alignment; /* In bytes */
71 unsigned long loops_per_jiffy;
73 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
75 unsigned char x86_max_cores; /* cpuid returned max cores value */
77 unsigned short x86_clflush_size;
79 unsigned char booted_cores; /* number of cores as seen by OS */
80 __u8 phys_proc_id; /* Physical processor id. */
81 __u8 cpu_core_id; /* Core id */
83 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
85 #define X86_VENDOR_INTEL 0
86 #define X86_VENDOR_CYRIX 1
87 #define X86_VENDOR_AMD 2
88 #define X86_VENDOR_UMC 3
89 #define X86_VENDOR_NEXGEN 4
90 #define X86_VENDOR_CENTAUR 5
91 #define X86_VENDOR_RISE 6
92 #define X86_VENDOR_TRANSMETA 7
93 #define X86_VENDOR_NSC 8
94 #define X86_VENDOR_NUM 9
95 #define X86_VENDOR_UNKNOWN 0xff
98 * capabilities of CPUs
101 extern struct cpuinfo_x86 boot_cpu_data;
102 extern struct cpuinfo_x86 new_cpu_data;
103 extern struct tss_struct doublefault_tss;
104 DECLARE_PER_CPU(struct tss_struct, init_tss);
107 extern struct cpuinfo_x86 cpu_data[];
108 #define current_cpu_data cpu_data[smp_processor_id()]
110 #define cpu_data (&boot_cpu_data)
111 #define current_cpu_data boot_cpu_data
114 extern int cpu_llc_id[NR_CPUS];
115 extern char ignore_fpu_irq;
117 void __init cpu_detect(struct cpuinfo_x86 *c);
119 extern void identify_cpu(struct cpuinfo_x86 *);
120 extern void print_cpu_info(struct cpuinfo_x86 *);
121 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
122 extern unsigned short num_cache_leaves;
125 extern void detect_ht(struct cpuinfo_x86 *c);
127 static inline void detect_ht(struct cpuinfo_x86 *c) {}
130 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
131 unsigned int *ecx, unsigned int *edx)
133 /* ecx is often an input as well as an output. */
139 : "0" (*eax), "2" (*ecx));
142 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
145 * Intel CPU features in CR4
147 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
148 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
149 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
150 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
151 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
152 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
153 #define X86_CR4_MCE 0x0040 /* Machine check enable */
154 #define X86_CR4_PGE 0x0080 /* enable global pages */
155 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
156 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
157 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
160 * Save the cr4 feature set we're using (ie
161 * Pentium 4MB enable and PPro Global page
162 * enable), so that any CPU's that boot up
163 * after us can get the correct flags.
165 extern unsigned long mmu_cr4_features;
167 static inline void set_in_cr4 (unsigned long mask)
170 mmu_cr4_features |= mask;
176 static inline void clear_in_cr4 (unsigned long mask)
179 mmu_cr4_features &= ~mask;
186 * NSC/Cyrix CPU configuration register indexes
189 #define CX86_PCR0 0x20
190 #define CX86_GCR 0xb8
191 #define CX86_CCR0 0xc0
192 #define CX86_CCR1 0xc1
193 #define CX86_CCR2 0xc2
194 #define CX86_CCR3 0xc3
195 #define CX86_CCR4 0xe8
196 #define CX86_CCR5 0xe9
197 #define CX86_CCR6 0xea
198 #define CX86_CCR7 0xeb
199 #define CX86_PCR1 0xf0
200 #define CX86_DIR0 0xfe
201 #define CX86_DIR1 0xff
202 #define CX86_ARR_BASE 0xc4
203 #define CX86_RCR_BASE 0xdc
206 * NSC/Cyrix CPU indexed register access macros
209 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
211 #define setCx86(reg, data) do { \
213 outb((data), 0x23); \
216 /* Stop speculative execution */
217 static inline void sync_core(void)
220 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
223 static inline void __monitor(const void *eax, unsigned long ecx,
226 /* "monitor %eax,%ecx,%edx;" */
228 ".byte 0x0f,0x01,0xc8;"
229 : :"a" (eax), "c" (ecx), "d"(edx));
232 static inline void __mwait(unsigned long eax, unsigned long ecx)
234 /* "mwait %eax,%ecx;" */
236 ".byte 0x0f,0x01,0xc9;"
237 : :"a" (eax), "c" (ecx));
240 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
242 /* from system description table in BIOS. Mostly for MCA use, but
243 others may find it useful. */
244 extern unsigned int machine_id;
245 extern unsigned int machine_submodel_id;
246 extern unsigned int BIOS_revision;
247 extern unsigned int mca_pentium_flag;
249 /* Boot loader type from the setup header */
250 extern int bootloader_type;
253 * User space process size: 3GB (default).
255 #define TASK_SIZE (PAGE_OFFSET)
257 /* This decides where the kernel will search for a free chunk of vm
258 * space during mmap's.
260 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
262 #define HAVE_ARCH_PICK_MMAP_LAYOUT
267 #define IO_BITMAP_BITS 65536
268 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
269 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
270 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
271 #define INVALID_IO_BITMAP_OFFSET 0x8000
272 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
274 struct i387_fsave_struct {
282 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
283 long status; /* software status information */
286 struct i387_fxsave_struct {
297 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
298 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
300 } __attribute__ ((aligned (16)));
302 struct i387_soft_struct {
310 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
311 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
313 unsigned long entry_eip;
317 struct i387_fsave_struct fsave;
318 struct i387_fxsave_struct fxsave;
319 struct i387_soft_struct soft;
326 struct thread_struct;
329 unsigned short back_link,__blh;
331 unsigned short ss0,__ss0h;
333 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
335 unsigned short ss2,__ss2h;
338 unsigned long eflags;
339 unsigned long eax,ecx,edx,ebx;
344 unsigned short es, __esh;
345 unsigned short cs, __csh;
346 unsigned short ss, __ssh;
347 unsigned short ds, __dsh;
348 unsigned short fs, __fsh;
349 unsigned short gs, __gsh;
350 unsigned short ldt, __ldth;
351 unsigned short trace, io_bitmap_base;
353 * The extra 1 is there because the CPU will access an
354 * additional byte beyond the end of the IO permission
355 * bitmap. The extra byte must be all 1 bits, and must
356 * be within the limit.
358 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
360 * Cache the current maximum and the last task that used the bitmap:
362 unsigned long io_bitmap_max;
363 struct thread_struct *io_bitmap_owner;
365 * pads the TSS to be cacheline-aligned (size is 0x100)
367 unsigned long __cacheline_filler[35];
369 * .. and then another 0x100 bytes for emergency kernel stack
371 unsigned long stack[64];
372 } __attribute__((packed));
374 #define ARCH_MIN_TASKALIGN 16
376 struct thread_struct {
377 /* cached TLS descriptors. */
378 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
380 unsigned long sysenter_cs;
385 /* Hardware debugging registers */
386 unsigned long debugreg[8]; /* %%db0-7 debug registers */
388 unsigned long cr2, trap_no, error_code;
389 /* floating point info */
390 union i387_union i387;
391 /* virtual 86 mode info */
392 struct vm86_struct __user * vm86_info;
393 unsigned long screen_bitmap;
394 unsigned long v86flags, v86mask, saved_esp0;
395 unsigned int saved_fs, saved_gs;
397 unsigned long *io_bitmap_ptr;
399 /* max allowed port in the bitmap, in bytes: */
400 unsigned long io_bitmap_max;
403 #define INIT_THREAD { \
404 .esp0 = sizeof(init_stack) + (long)&init_stack, \
406 .sysenter_cs = __KERNEL_CS, \
407 .io_bitmap_ptr = NULL, \
408 .fs = __KERNEL_PDA, \
412 * Note that the .io_bitmap member must be extra-big. This is because
413 * the CPU will access an additional byte beyond the end of the IO
414 * permission bitmap. The extra byte must be all 1 bits, and must
415 * be within the limit.
418 .esp0 = sizeof(init_stack) + (long)&init_stack, \
419 .ss0 = __KERNEL_DS, \
420 .ss1 = __KERNEL_CS, \
421 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
422 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
425 #define start_thread(regs, new_eip, new_esp) do { \
426 __asm__("movl %0,%%gs": :"r" (0)); \
429 regs->xds = __USER_DS; \
430 regs->xes = __USER_DS; \
431 regs->xss = __USER_DS; \
432 regs->xcs = __USER_CS; \
433 regs->eip = new_eip; \
434 regs->esp = new_esp; \
437 /* Forward declaration, a strange C thing */
441 /* Free all resources held by a thread. */
442 extern void release_thread(struct task_struct *);
444 /* Prepare to copy thread state - unlazy all lazy status */
445 extern void prepare_to_copy(struct task_struct *tsk);
448 * create a kernel thread without removing it from tasklists
450 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
452 extern unsigned long thread_saved_pc(struct task_struct *tsk);
453 void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
455 unsigned long get_wchan(struct task_struct *p);
457 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
458 #define KSTK_TOP(info) \
460 unsigned long *__ptr = (unsigned long *)(info); \
461 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
465 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
466 * This is necessary to guarantee that the entire "struct pt_regs"
467 * is accessable even if the CPU haven't stored the SS/ESP registers
468 * on the stack (interrupt gate does not save these registers
469 * when switching to the same priv ring).
470 * Therefore beware: accessing the xss/esp fields of the
471 * "struct pt_regs" is possible, but they may contain the
472 * completely wrong values.
474 #define task_pt_regs(task) \
476 struct pt_regs *__regs__; \
477 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
481 #define KSTK_EIP(task) (task_pt_regs(task)->eip)
482 #define KSTK_ESP(task) (task_pt_regs(task)->esp)
485 struct microcode_header {
493 unsigned int datasize;
494 unsigned int totalsize;
495 unsigned int reserved[3];
499 struct microcode_header hdr;
500 unsigned int bits[0];
503 typedef struct microcode microcode_t;
504 typedef struct microcode_header microcode_header_t;
506 /* microcode format is extended from prescott processors */
507 struct extended_signature {
513 struct extended_sigtable {
516 unsigned int reserved[3];
517 struct extended_signature sigs[0];
520 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
521 static inline void rep_nop(void)
523 __asm__ __volatile__("rep;nop": : :"memory");
526 #define cpu_relax() rep_nop()
528 static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
530 tss->esp0 = thread->esp0;
531 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
532 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
533 tss->ss1 = thread->sysenter_cs;
534 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
539 static inline unsigned long native_get_debugreg(int regno)
541 unsigned long val = 0; /* Damn you, gcc! */
545 asm("movl %%db0, %0" :"=r" (val)); break;
547 asm("movl %%db1, %0" :"=r" (val)); break;
549 asm("movl %%db2, %0" :"=r" (val)); break;
551 asm("movl %%db3, %0" :"=r" (val)); break;
553 asm("movl %%db6, %0" :"=r" (val)); break;
555 asm("movl %%db7, %0" :"=r" (val)); break;
562 static inline void native_set_debugreg(int regno, unsigned long value)
566 asm("movl %0,%%db0" : /* no output */ :"r" (value));
569 asm("movl %0,%%db1" : /* no output */ :"r" (value));
572 asm("movl %0,%%db2" : /* no output */ :"r" (value));
575 asm("movl %0,%%db3" : /* no output */ :"r" (value));
578 asm("movl %0,%%db6" : /* no output */ :"r" (value));
581 asm("movl %0,%%db7" : /* no output */ :"r" (value));
589 * Set IOPL bits in EFLAGS from given mask
591 static inline void native_set_iopl_mask(unsigned mask)
594 __asm__ __volatile__ ("pushfl;"
601 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
604 #ifdef CONFIG_PARAVIRT
605 #include <asm/paravirt.h>
607 #define paravirt_enabled() 0
608 #define __cpuid native_cpuid
610 static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
612 native_load_esp0(tss, thread);
616 * These special macros can be used to get or set a debugging register
618 #define get_debugreg(var, register) \
619 (var) = native_get_debugreg(register)
620 #define set_debugreg(value, register) \
621 native_set_debugreg(register, value)
623 #define set_iopl_mask native_set_iopl_mask
624 #endif /* CONFIG_PARAVIRT */
627 * Generic CPUID function
628 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
629 * resulting in stale register contents being returned.
631 static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
635 __cpuid(eax, ebx, ecx, edx);
638 /* Some CPUID calls want 'count' to be placed in ecx */
639 static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
644 __cpuid(eax, ebx, ecx, edx);
648 * CPUID functions returning a single datum
650 static inline unsigned int cpuid_eax(unsigned int op)
652 unsigned int eax, ebx, ecx, edx;
654 cpuid(op, &eax, &ebx, &ecx, &edx);
657 static inline unsigned int cpuid_ebx(unsigned int op)
659 unsigned int eax, ebx, ecx, edx;
661 cpuid(op, &eax, &ebx, &ecx, &edx);
664 static inline unsigned int cpuid_ecx(unsigned int op)
666 unsigned int eax, ebx, ecx, edx;
668 cpuid(op, &eax, &ebx, &ecx, &edx);
671 static inline unsigned int cpuid_edx(unsigned int op)
673 unsigned int eax, ebx, ecx, edx;
675 cpuid(op, &eax, &ebx, &ecx, &edx);
679 /* generic versions from gas */
680 #define GENERIC_NOP1 ".byte 0x90\n"
681 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
682 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
683 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
684 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
685 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
686 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
687 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
690 #define K8_NOP1 GENERIC_NOP1
691 #define K8_NOP2 ".byte 0x66,0x90\n"
692 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
693 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
694 #define K8_NOP5 K8_NOP3 K8_NOP2
695 #define K8_NOP6 K8_NOP3 K8_NOP3
696 #define K8_NOP7 K8_NOP4 K8_NOP3
697 #define K8_NOP8 K8_NOP4 K8_NOP4
700 /* uses eax dependencies (arbitary choice) */
701 #define K7_NOP1 GENERIC_NOP1
702 #define K7_NOP2 ".byte 0x8b,0xc0\n"
703 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
704 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
705 #define K7_NOP5 K7_NOP4 ASM_NOP1
706 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
707 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
708 #define K7_NOP8 K7_NOP7 ASM_NOP1
711 #define ASM_NOP1 K8_NOP1
712 #define ASM_NOP2 K8_NOP2
713 #define ASM_NOP3 K8_NOP3
714 #define ASM_NOP4 K8_NOP4
715 #define ASM_NOP5 K8_NOP5
716 #define ASM_NOP6 K8_NOP6
717 #define ASM_NOP7 K8_NOP7
718 #define ASM_NOP8 K8_NOP8
719 #elif defined(CONFIG_MK7)
720 #define ASM_NOP1 K7_NOP1
721 #define ASM_NOP2 K7_NOP2
722 #define ASM_NOP3 K7_NOP3
723 #define ASM_NOP4 K7_NOP4
724 #define ASM_NOP5 K7_NOP5
725 #define ASM_NOP6 K7_NOP6
726 #define ASM_NOP7 K7_NOP7
727 #define ASM_NOP8 K7_NOP8
729 #define ASM_NOP1 GENERIC_NOP1
730 #define ASM_NOP2 GENERIC_NOP2
731 #define ASM_NOP3 GENERIC_NOP3
732 #define ASM_NOP4 GENERIC_NOP4
733 #define ASM_NOP5 GENERIC_NOP5
734 #define ASM_NOP6 GENERIC_NOP6
735 #define ASM_NOP7 GENERIC_NOP7
736 #define ASM_NOP8 GENERIC_NOP8
739 #define ASM_NOP_MAX 8
741 /* Prefetch instructions for Pentium III and AMD Athlon */
742 /* It's not worth to care about 3dnow! prefetches for the K6
743 because they are microcoded there and very slow.
744 However we don't do prefetches for pre XP Athlons currently
745 That should be fixed. */
746 #define ARCH_HAS_PREFETCH
747 static inline void prefetch(const void *x)
749 alternative_input(ASM_NOP4,
755 #define ARCH_HAS_PREFETCH
756 #define ARCH_HAS_PREFETCHW
757 #define ARCH_HAS_SPINLOCK_PREFETCH
759 /* 3dnow! prefetch to get an exclusive cache line. Useful for
760 spinlocks to avoid one state transition in the cache coherency protocol. */
761 static inline void prefetchw(const void *x)
763 alternative_input(ASM_NOP4,
768 #define spin_lock_prefetch(x) prefetchw(x)
770 extern void select_idle_routine(const struct cpuinfo_x86 *c);
772 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
774 extern unsigned long boot_option_idle_override;
775 extern void enable_sep_cpu(void);
776 extern int sysenter_setup(void);
778 extern void cpu_set_gdt(int);
779 extern void cpu_init(void);
781 #endif /* __ASM_I386_PROCESSOR_H */