2 * include/asm-i386/processor.h
4 * Copyright (C) 1994 Linus Torvalds
7 #ifndef __ASM_I386_PROCESSOR_H
8 #define __ASM_I386_PROCESSOR_H
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
14 #include <asm/types.h>
15 #include <asm/sigcontext.h>
16 #include <asm/cpufeature.h>
18 #include <asm/system.h>
19 #include <linux/cache.h>
20 #include <linux/threads.h>
21 #include <asm/percpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/init.h>
25 /* flag for disabling the tsc */
26 extern int tsc_disable;
32 #define desc_empty(desc) \
33 (!((desc)->a | (desc)->b))
35 #define desc_equal(desc1, desc2) \
36 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
38 * Default implementation of macro that returns current
39 * instruction pointer ("program counter").
41 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
44 * CPU type and hardware bug flags. Kept separately for each CPU.
45 * Members of this structure are referenced in head.S, so think twice
46 * before touching them. [mj]
50 __u8 x86; /* CPU family */
51 __u8 x86_vendor; /* CPU vendor */
54 char wp_works_ok; /* It doesn't on 386's */
55 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
58 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
59 unsigned long x86_capability[NCAPINTS];
60 char x86_vendor_id[16];
61 char x86_model_id[64];
62 int x86_cache_size; /* in KB - valid for CPUS which support this
64 int x86_cache_alignment; /* In bytes */
70 unsigned long loops_per_jiffy;
72 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
74 unsigned char x86_max_cores; /* cpuid returned max cores value */
76 unsigned short x86_clflush_size;
78 unsigned char booted_cores; /* number of cores as seen by OS */
79 __u8 phys_proc_id; /* Physical processor id. */
80 __u8 cpu_core_id; /* Core id */
82 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
84 #define X86_VENDOR_INTEL 0
85 #define X86_VENDOR_CYRIX 1
86 #define X86_VENDOR_AMD 2
87 #define X86_VENDOR_UMC 3
88 #define X86_VENDOR_NEXGEN 4
89 #define X86_VENDOR_CENTAUR 5
90 #define X86_VENDOR_RISE 6
91 #define X86_VENDOR_TRANSMETA 7
92 #define X86_VENDOR_NSC 8
93 #define X86_VENDOR_NUM 9
94 #define X86_VENDOR_UNKNOWN 0xff
97 * capabilities of CPUs
100 extern struct cpuinfo_x86 boot_cpu_data;
101 extern struct cpuinfo_x86 new_cpu_data;
102 extern struct tss_struct doublefault_tss;
103 DECLARE_PER_CPU(struct tss_struct, init_tss);
106 extern struct cpuinfo_x86 cpu_data[];
107 #define current_cpu_data cpu_data[smp_processor_id()]
109 #define cpu_data (&boot_cpu_data)
110 #define current_cpu_data boot_cpu_data
113 extern int cpu_llc_id[NR_CPUS];
114 extern char ignore_fpu_irq;
116 void __init cpu_detect(struct cpuinfo_x86 *c);
118 extern void identify_cpu(struct cpuinfo_x86 *);
119 extern void print_cpu_info(struct cpuinfo_x86 *);
120 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
121 extern unsigned short num_cache_leaves;
124 extern void detect_ht(struct cpuinfo_x86 *c);
126 static inline void detect_ht(struct cpuinfo_x86 *c) {}
132 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
133 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
134 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
135 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
136 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
137 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
138 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
139 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
140 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
141 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
142 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
143 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
144 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
145 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
146 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
147 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
148 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
150 static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
151 unsigned int *ecx, unsigned int *edx)
153 /* ecx is often an input as well as an output. */
159 : "0" (*eax), "2" (*ecx));
162 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
165 * Intel CPU features in CR4
167 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
168 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
169 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
170 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
171 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
172 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
173 #define X86_CR4_MCE 0x0040 /* Machine check enable */
174 #define X86_CR4_PGE 0x0080 /* enable global pages */
175 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
176 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
177 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
180 * Save the cr4 feature set we're using (ie
181 * Pentium 4MB enable and PPro Global page
182 * enable), so that any CPU's that boot up
183 * after us can get the correct flags.
185 extern unsigned long mmu_cr4_features;
187 static inline void set_in_cr4 (unsigned long mask)
190 mmu_cr4_features |= mask;
196 static inline void clear_in_cr4 (unsigned long mask)
199 mmu_cr4_features &= ~mask;
206 * NSC/Cyrix CPU configuration register indexes
209 #define CX86_PCR0 0x20
210 #define CX86_GCR 0xb8
211 #define CX86_CCR0 0xc0
212 #define CX86_CCR1 0xc1
213 #define CX86_CCR2 0xc2
214 #define CX86_CCR3 0xc3
215 #define CX86_CCR4 0xe8
216 #define CX86_CCR5 0xe9
217 #define CX86_CCR6 0xea
218 #define CX86_CCR7 0xeb
219 #define CX86_PCR1 0xf0
220 #define CX86_DIR0 0xfe
221 #define CX86_DIR1 0xff
222 #define CX86_ARR_BASE 0xc4
223 #define CX86_RCR_BASE 0xdc
226 * NSC/Cyrix CPU indexed register access macros
229 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
231 #define setCx86(reg, data) do { \
233 outb((data), 0x23); \
236 /* Stop speculative execution */
237 static inline void sync_core(void)
240 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
243 static inline void __monitor(const void *eax, unsigned long ecx,
246 /* "monitor %eax,%ecx,%edx;" */
248 ".byte 0x0f,0x01,0xc8;"
249 : :"a" (eax), "c" (ecx), "d"(edx));
252 static inline void __mwait(unsigned long eax, unsigned long ecx)
254 /* "mwait %eax,%ecx;" */
256 ".byte 0x0f,0x01,0xc9;"
257 : :"a" (eax), "c" (ecx));
260 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
262 /* from system description table in BIOS. Mostly for MCA use, but
263 others may find it useful. */
264 extern unsigned int machine_id;
265 extern unsigned int machine_submodel_id;
266 extern unsigned int BIOS_revision;
267 extern unsigned int mca_pentium_flag;
269 /* Boot loader type from the setup header */
270 extern int bootloader_type;
273 * User space process size: 3GB (default).
275 #define TASK_SIZE (PAGE_OFFSET)
277 /* This decides where the kernel will search for a free chunk of vm
278 * space during mmap's.
280 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
282 #define HAVE_ARCH_PICK_MMAP_LAYOUT
287 #define IO_BITMAP_BITS 65536
288 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
289 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
290 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
291 #define INVALID_IO_BITMAP_OFFSET 0x8000
292 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
294 struct i387_fsave_struct {
302 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
303 long status; /* software status information */
306 struct i387_fxsave_struct {
317 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
318 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
320 } __attribute__ ((aligned (16)));
322 struct i387_soft_struct {
330 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
331 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
333 unsigned long entry_eip;
337 struct i387_fsave_struct fsave;
338 struct i387_fxsave_struct fxsave;
339 struct i387_soft_struct soft;
346 struct thread_struct;
349 unsigned short back_link,__blh;
351 unsigned short ss0,__ss0h;
353 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
355 unsigned short ss2,__ss2h;
358 unsigned long eflags;
359 unsigned long eax,ecx,edx,ebx;
364 unsigned short es, __esh;
365 unsigned short cs, __csh;
366 unsigned short ss, __ssh;
367 unsigned short ds, __dsh;
368 unsigned short fs, __fsh;
369 unsigned short gs, __gsh;
370 unsigned short ldt, __ldth;
371 unsigned short trace, io_bitmap_base;
373 * The extra 1 is there because the CPU will access an
374 * additional byte beyond the end of the IO permission
375 * bitmap. The extra byte must be all 1 bits, and must
376 * be within the limit.
378 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
380 * Cache the current maximum and the last task that used the bitmap:
382 unsigned long io_bitmap_max;
383 struct thread_struct *io_bitmap_owner;
385 * pads the TSS to be cacheline-aligned (size is 0x100)
387 unsigned long __cacheline_filler[35];
389 * .. and then another 0x100 bytes for emergency kernel stack
391 unsigned long stack[64];
392 } __attribute__((packed));
394 #define ARCH_MIN_TASKALIGN 16
396 struct thread_struct {
397 /* cached TLS descriptors. */
398 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
400 unsigned long sysenter_cs;
405 /* Hardware debugging registers */
406 unsigned long debugreg[8]; /* %%db0-7 debug registers */
408 unsigned long cr2, trap_no, error_code;
409 /* floating point info */
410 union i387_union i387;
411 /* virtual 86 mode info */
412 struct vm86_struct __user * vm86_info;
413 unsigned long screen_bitmap;
414 unsigned long v86flags, v86mask, saved_esp0;
415 unsigned int saved_fs, saved_gs;
417 unsigned long *io_bitmap_ptr;
419 /* max allowed port in the bitmap, in bytes: */
420 unsigned long io_bitmap_max;
423 #define INIT_THREAD { \
425 .sysenter_cs = __KERNEL_CS, \
426 .io_bitmap_ptr = NULL, \
427 .fs = __KERNEL_PDA, \
431 * Note that the .io_bitmap member must be extra-big. This is because
432 * the CPU will access an additional byte beyond the end of the IO
433 * permission bitmap. The extra byte must be all 1 bits, and must
434 * be within the limit.
437 .esp0 = sizeof(init_stack) + (long)&init_stack, \
438 .ss0 = __KERNEL_DS, \
439 .ss1 = __KERNEL_CS, \
440 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
441 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
444 #define start_thread(regs, new_eip, new_esp) do { \
445 __asm__("movl %0,%%gs": :"r" (0)); \
448 regs->xds = __USER_DS; \
449 regs->xes = __USER_DS; \
450 regs->xss = __USER_DS; \
451 regs->xcs = __USER_CS; \
452 regs->eip = new_eip; \
453 regs->esp = new_esp; \
456 /* Forward declaration, a strange C thing */
460 /* Free all resources held by a thread. */
461 extern void release_thread(struct task_struct *);
463 /* Prepare to copy thread state - unlazy all lazy status */
464 extern void prepare_to_copy(struct task_struct *tsk);
467 * create a kernel thread without removing it from tasklists
469 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
471 extern unsigned long thread_saved_pc(struct task_struct *tsk);
472 void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
474 unsigned long get_wchan(struct task_struct *p);
476 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
477 #define KSTK_TOP(info) \
479 unsigned long *__ptr = (unsigned long *)(info); \
480 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
484 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
485 * This is necessary to guarantee that the entire "struct pt_regs"
486 * is accessable even if the CPU haven't stored the SS/ESP registers
487 * on the stack (interrupt gate does not save these registers
488 * when switching to the same priv ring).
489 * Therefore beware: accessing the xss/esp fields of the
490 * "struct pt_regs" is possible, but they may contain the
491 * completely wrong values.
493 #define task_pt_regs(task) \
495 struct pt_regs *__regs__; \
496 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
500 #define KSTK_EIP(task) (task_pt_regs(task)->eip)
501 #define KSTK_ESP(task) (task_pt_regs(task)->esp)
504 struct microcode_header {
512 unsigned int datasize;
513 unsigned int totalsize;
514 unsigned int reserved[3];
518 struct microcode_header hdr;
519 unsigned int bits[0];
522 typedef struct microcode microcode_t;
523 typedef struct microcode_header microcode_header_t;
525 /* microcode format is extended from prescott processors */
526 struct extended_signature {
532 struct extended_sigtable {
535 unsigned int reserved[3];
536 struct extended_signature sigs[0];
539 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
540 static inline void rep_nop(void)
542 __asm__ __volatile__("rep;nop": : :"memory");
545 #define cpu_relax() rep_nop()
547 #ifdef CONFIG_PARAVIRT
548 #include <asm/paravirt.h>
550 #define paravirt_enabled() 0
551 #define __cpuid native_cpuid
553 static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
555 tss->esp0 = thread->esp0;
556 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
557 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
558 tss->ss1 = thread->sysenter_cs;
559 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
564 * These special macros can be used to get or set a debugging register
566 #define get_debugreg(var, register) \
567 __asm__("movl %%db" #register ", %0" \
569 #define set_debugreg(value, register) \
570 __asm__("movl %0,%%db" #register \
574 #define set_iopl_mask native_set_iopl_mask
575 #endif /* CONFIG_PARAVIRT */
578 * Set IOPL bits in EFLAGS from given mask
580 static fastcall inline void native_set_iopl_mask(unsigned mask)
583 __asm__ __volatile__ ("pushfl;"
590 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
594 * Generic CPUID function
595 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
596 * resulting in stale register contents being returned.
598 static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
602 __cpuid(eax, ebx, ecx, edx);
605 /* Some CPUID calls want 'count' to be placed in ecx */
606 static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
611 __cpuid(eax, ebx, ecx, edx);
615 * CPUID functions returning a single datum
617 static inline unsigned int cpuid_eax(unsigned int op)
619 unsigned int eax, ebx, ecx, edx;
621 cpuid(op, &eax, &ebx, &ecx, &edx);
624 static inline unsigned int cpuid_ebx(unsigned int op)
626 unsigned int eax, ebx, ecx, edx;
628 cpuid(op, &eax, &ebx, &ecx, &edx);
631 static inline unsigned int cpuid_ecx(unsigned int op)
633 unsigned int eax, ebx, ecx, edx;
635 cpuid(op, &eax, &ebx, &ecx, &edx);
638 static inline unsigned int cpuid_edx(unsigned int op)
640 unsigned int eax, ebx, ecx, edx;
642 cpuid(op, &eax, &ebx, &ecx, &edx);
646 /* generic versions from gas */
647 #define GENERIC_NOP1 ".byte 0x90\n"
648 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
649 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
650 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
651 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
652 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
653 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
654 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
657 #define K8_NOP1 GENERIC_NOP1
658 #define K8_NOP2 ".byte 0x66,0x90\n"
659 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
660 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
661 #define K8_NOP5 K8_NOP3 K8_NOP2
662 #define K8_NOP6 K8_NOP3 K8_NOP3
663 #define K8_NOP7 K8_NOP4 K8_NOP3
664 #define K8_NOP8 K8_NOP4 K8_NOP4
667 /* uses eax dependencies (arbitary choice) */
668 #define K7_NOP1 GENERIC_NOP1
669 #define K7_NOP2 ".byte 0x8b,0xc0\n"
670 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
671 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
672 #define K7_NOP5 K7_NOP4 ASM_NOP1
673 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
674 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
675 #define K7_NOP8 K7_NOP7 ASM_NOP1
678 #define ASM_NOP1 K8_NOP1
679 #define ASM_NOP2 K8_NOP2
680 #define ASM_NOP3 K8_NOP3
681 #define ASM_NOP4 K8_NOP4
682 #define ASM_NOP5 K8_NOP5
683 #define ASM_NOP6 K8_NOP6
684 #define ASM_NOP7 K8_NOP7
685 #define ASM_NOP8 K8_NOP8
686 #elif defined(CONFIG_MK7)
687 #define ASM_NOP1 K7_NOP1
688 #define ASM_NOP2 K7_NOP2
689 #define ASM_NOP3 K7_NOP3
690 #define ASM_NOP4 K7_NOP4
691 #define ASM_NOP5 K7_NOP5
692 #define ASM_NOP6 K7_NOP6
693 #define ASM_NOP7 K7_NOP7
694 #define ASM_NOP8 K7_NOP8
696 #define ASM_NOP1 GENERIC_NOP1
697 #define ASM_NOP2 GENERIC_NOP2
698 #define ASM_NOP3 GENERIC_NOP3
699 #define ASM_NOP4 GENERIC_NOP4
700 #define ASM_NOP5 GENERIC_NOP5
701 #define ASM_NOP6 GENERIC_NOP6
702 #define ASM_NOP7 GENERIC_NOP7
703 #define ASM_NOP8 GENERIC_NOP8
706 #define ASM_NOP_MAX 8
708 /* Prefetch instructions for Pentium III and AMD Athlon */
709 /* It's not worth to care about 3dnow! prefetches for the K6
710 because they are microcoded there and very slow.
711 However we don't do prefetches for pre XP Athlons currently
712 That should be fixed. */
713 #define ARCH_HAS_PREFETCH
714 static inline void prefetch(const void *x)
716 alternative_input(ASM_NOP4,
722 #define ARCH_HAS_PREFETCH
723 #define ARCH_HAS_PREFETCHW
724 #define ARCH_HAS_SPINLOCK_PREFETCH
726 /* 3dnow! prefetch to get an exclusive cache line. Useful for
727 spinlocks to avoid one state transition in the cache coherency protocol. */
728 static inline void prefetchw(const void *x)
730 alternative_input(ASM_NOP4,
735 #define spin_lock_prefetch(x) prefetchw(x)
737 extern void select_idle_routine(const struct cpuinfo_x86 *c);
739 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
741 extern unsigned long boot_option_idle_override;
742 extern void enable_sep_cpu(void);
743 extern int sysenter_setup(void);
745 extern int init_gdt(int cpu, struct task_struct *idle);
746 extern void cpu_set_gdt(int);
747 extern void secondary_cpu_init(void);
749 #endif /* __ASM_I386_PROCESSOR_H */