1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/bitops.h>
4 #include <asm/processor.h>
10 #ifdef CONFIG_X86_OOSTORE
12 static u32 __cpuinit power2(u32 x)
22 * Set up an actual MCR
25 static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
30 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
31 lo &= ~0xFFF; /* Remove the ctrl value bits */
32 lo |= key; /* Attribute we wish to set */
33 wrmsr(reg+MSR_IDT_MCR0, lo, hi);
34 mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
38 * Figure what we can cover with MCR's
40 * Shortcut: We know you can't put 4Gig of RAM on a winchip
43 static u32 __cpuinit ramtop(void) /* 16388 */
47 u32 clip = 0xFFFFFFFFUL;
49 for (i = 0; i < e820.nr_map; i++) {
50 unsigned long start, end;
52 if (e820.map[i].addr > 0xFFFFFFFFUL)
55 * Don't MCR over reserved space. Ignore the ISA hole
56 * we frob around that catastrophy already
59 if (e820.map[i].type == E820_RESERVED)
61 if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
62 clip = e820.map[i].addr;
65 start = e820.map[i].addr;
66 end = e820.map[i].addr + e820.map[i].size;
72 /* Everything below 'top' should be RAM except for the ISA hole.
73 Because of the limited MCR's we want to map NV/ACPI into our
74 MCR range for gunk in RAM
76 Clip might cause us to MCR insufficient RAM but that is an
77 acceptable failure mode and should only bite obscure boxes with
80 The second case Clip sometimes kicks in is when the EBDA is marked
81 as reserved. Again we fail safe with reasonable results
91 * Compute a set of MCR's to give maximum coverage
94 static int __cpuinit centaur_mcr_compute(int nr, int key)
97 u32 root = power2(mem);
108 * Find the largest block we will fill going upwards
111 u32 high = power2(mem-top);
114 * Find the largest block we will fill going downwards
120 * Don't fill below 1Mb going downwards as there
121 * is an ISA hole in the way.
124 if(base <= 1024*1024)
128 * See how much space we could cover by filling below
134 else if(floor ==512*1024)
137 /* And forget ROM space */
140 * Now install the largest coverage we get
143 if(fspace > high && fspace > low)
145 centaur_mcr_insert(ct, floor, fspace, key);
150 centaur_mcr_insert(ct, top, high, key);
156 centaur_mcr_insert(ct, base, low, key);
162 * We loaded ct values. We now need to set the mask. The caller
169 static void __cpuinit centaur_create_optimal_mcr(void)
173 * Allocate up to 6 mcrs to mark as much of ram as possible
174 * as write combining and weak write ordered.
176 * To experiment with: Linux never uses stack operations for
177 * mmio spaces so we could globally enable stack operation wc
179 * Load the registers with type 31 - full write combining, all
180 * writes weakly ordered.
182 int used = centaur_mcr_compute(6, 31);
189 wrmsr(MSR_IDT_MCR0+i, 0, 0);
192 static void __cpuinit winchip2_create_optimal_mcr(void)
198 * Allocate up to 6 mcrs to mark as much of ram as possible
199 * as write combining, weak store ordered.
201 * Load the registers with type 25
202 * 8 - weak write ordering
203 * 16 - weak read ordering
204 * 1 - write combining
207 int used = centaur_mcr_compute(6, 25);
210 * Mark the registers we are using.
213 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
216 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
223 wrmsr(MSR_IDT_MCR0+i, 0, 0);
227 * Handle the MCR key on the Winchip 2.
230 static void __cpuinit winchip2_unprotect_mcr(void)
235 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
236 lo&=~0x1C0; /* blank bits 8-6 */
238 lo |= key<<6; /* replace with unlock key */
239 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
242 static void __cpuinit winchip2_protect_mcr(void)
246 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
247 lo&=~0x1C0; /* blank bits 8-6 */
248 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
250 #endif /* CONFIG_X86_OOSTORE */
252 #define ACE_PRESENT (1 << 6)
253 #define ACE_ENABLED (1 << 7)
254 #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
256 #define RNG_PRESENT (1 << 2)
257 #define RNG_ENABLED (1 << 3)
258 #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
260 static void __cpuinit init_c3(struct cpuinfo_x86 *c)
264 /* Test for Centaur Extended Feature Flags presence */
265 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
266 u32 tmp = cpuid_edx(0xC0000001);
268 /* enable ACE unit, if present and disabled */
269 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
270 rdmsr (MSR_VIA_FCR, lo, hi);
271 lo |= ACE_FCR; /* enable ACE unit */
272 wrmsr (MSR_VIA_FCR, lo, hi);
273 printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
276 /* enable RNG unit, if present and disabled */
277 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
278 rdmsr (MSR_VIA_RNG, lo, hi);
279 lo |= RNG_ENABLE; /* enable RNG unit */
280 wrmsr (MSR_VIA_RNG, lo, hi);
281 printk(KERN_INFO "CPU: Enabled h/w RNG\n");
284 /* store Centaur Extended Feature Flags as
285 * word 5 of the CPU capability bit array
287 c->x86_capability[5] = cpuid_edx(0xC0000001);
290 /* Cyrix III family needs CX8 & PGE explicity enabled. */
291 if (c->x86_model >=6 && c->x86_model <= 9) {
292 rdmsr (MSR_VIA_FCR, lo, hi);
294 wrmsr (MSR_VIA_FCR, lo, hi);
295 set_bit(X86_FEATURE_CX8, c->x86_capability);
298 /* Before Nehemiah, the C3's had 3dNOW! */
299 if (c->x86_model >=6 && c->x86_model <9)
300 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
303 display_cacheinfo(c);
306 static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
335 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
336 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
337 clear_bit(0*32+31, c->x86_capability);
342 switch(c->x86_model) {
345 fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
347 printk(KERN_NOTICE "Disabling bugged TSC.\n");
348 clear_bit(X86_FEATURE_TSC, c->x86_capability);
349 #ifdef CONFIG_X86_OOSTORE
350 centaur_create_optimal_mcr();
352 write combining on non-stack, non-string
353 write combining on string, all types
356 The C6 original lacks weak read order
358 Note 0x120 is write only on Winchip 1 */
360 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
364 switch(c->x86_mask) {
375 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
377 #ifdef CONFIG_X86_OOSTORE
378 winchip2_unprotect_mcr();
379 winchip2_create_optimal_mcr();
380 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
382 write combining on non-stack, non-string
383 write combining on string, all types
387 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
388 winchip2_protect_mcr();
393 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
395 #ifdef CONFIG_X86_OOSTORE
396 winchip2_unprotect_mcr();
397 winchip2_create_optimal_mcr();
398 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
400 write combining on non-stack, non-string
401 write combining on string, all types
405 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
406 winchip2_protect_mcr();
413 rdmsr(MSR_IDT_FCR1, lo, hi);
414 newlo=(lo|fcr_set) & (~fcr_clr);
417 printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
418 wrmsr(MSR_IDT_FCR1, newlo, hi );
420 printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
422 /* Emulate MTRRs using Centaur's MCR. */
423 set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
425 set_bit(X86_FEATURE_CX8, c->x86_capability);
426 /* Set 3DNow! on Winchip 2 and above. */
427 if (c->x86_model >=8)
428 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
429 /* See if we can find out some more. */
430 if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
432 cpuid(0x80000005,&aa,&bb,&cc,&dd);
433 /* Add L1 data and code cache sizes. */
434 c->x86_cache_size = (cc>>24)+(dd>>24);
436 sprintf( c->x86_model_id, "WinChip %s", name );
445 static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
447 /* VIA C3 CPUs (670-68F) need further shifting. */
448 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
451 /* VIA also screwed up Nehemiah stepping 1, and made
452 it return '65KB' instead of '64KB'
453 - Note, it seems this may only be in engineering samples. */
454 if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
460 static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
461 .c_vendor = "Centaur",
462 .c_ident = { "CentaurHauls" },
463 .c_init = init_centaur,
464 .c_size_cache = centaur_size_cache,
467 int __init centaur_init_cpu(void)
469 cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev;