1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
17 struct fixed_range_block {
18 int base_msr; /* start address of an MTRR block */
19 int ranges; /* number of MTRRs in this block */
22 static struct fixed_range_block fixed_range_blocks[] = {
23 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
24 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
25 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
29 static unsigned long smp_changes_mask;
30 static int mtrr_state_set;
33 struct mtrr_state_type mtrr_state = {};
34 EXPORT_SYMBOL_GPL(mtrr_state);
36 static int __initdata mtrr_show;
37 static int __init mtrr_debug(char *opt)
42 early_param("mtrr.show", mtrr_debug);
45 * Returns the effective MTRR type for the region
47 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
48 * - 0xFF - when MTRR is not enabled
50 u8 mtrr_type_lookup(u64 start, u64 end)
54 u8 prev_match, curr_match;
59 if (!mtrr_state.enabled)
62 /* Make end inclusive end, instead of exclusive */
65 /* Look in fixed ranges. Just return the type as per start */
66 if (mtrr_state.have_fixed && (start < 0x100000)) {
69 if (start < 0x80000) {
72 return mtrr_state.fixed_ranges[idx];
73 } else if (start < 0xC0000) {
75 idx += ((start - 0x80000) >> 14);
76 return mtrr_state.fixed_ranges[idx];
77 } else if (start < 0x1000000) {
79 idx += ((start - 0xC0000) >> 12);
80 return mtrr_state.fixed_ranges[idx];
85 * Look in variable ranges
86 * Look of multiple ranges matching this address and pick type
87 * as per MTRR precedence
89 if (!(mtrr_state.enabled & 2)) {
90 return mtrr_state.def_type;
94 for (i = 0; i < num_var_ranges; ++i) {
95 unsigned short start_state, end_state;
97 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
100 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
101 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
102 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
103 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
105 start_state = ((start & mask) == (base & mask));
106 end_state = ((end & mask) == (base & mask));
107 if (start_state != end_state)
110 if ((start & mask) != (base & mask)) {
114 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
115 if (prev_match == 0xFF) {
116 prev_match = curr_match;
120 if (prev_match == MTRR_TYPE_UNCACHABLE ||
121 curr_match == MTRR_TYPE_UNCACHABLE) {
122 return MTRR_TYPE_UNCACHABLE;
125 if ((prev_match == MTRR_TYPE_WRBACK &&
126 curr_match == MTRR_TYPE_WRTHROUGH) ||
127 (prev_match == MTRR_TYPE_WRTHROUGH &&
128 curr_match == MTRR_TYPE_WRBACK)) {
129 prev_match = MTRR_TYPE_WRTHROUGH;
130 curr_match = MTRR_TYPE_WRTHROUGH;
133 if (prev_match != curr_match) {
134 return MTRR_TYPE_UNCACHABLE;
139 if (start >= (1ULL<<32) && (end < mtrr_tom2))
140 return MTRR_TYPE_WRBACK;
143 if (prev_match != 0xFF)
146 return mtrr_state.def_type;
149 /* Get the MSR pair relating to a var range */
151 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
153 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
154 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
157 /* fill the MSR pair relating to a var range */
158 void fill_mtrr_var_range(unsigned int index,
159 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
161 struct mtrr_var_range *vr;
163 vr = mtrr_state.var_ranges;
165 vr[index].base_lo = base_lo;
166 vr[index].base_hi = base_hi;
167 vr[index].mask_lo = mask_lo;
168 vr[index].mask_hi = mask_hi;
172 get_fixed_ranges(mtrr_type * frs)
174 unsigned int *p = (unsigned int *) frs;
177 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
179 for (i = 0; i < 2; i++)
180 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
181 for (i = 0; i < 8; i++)
182 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
185 void mtrr_save_fixed_ranges(void *info)
188 get_fixed_ranges(mtrr_state.fixed_ranges);
191 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
195 for (i = 0; i < 8; ++i, ++types, base += step)
196 printk(KERN_INFO "MTRR %05X-%05X %s\n",
197 base, base + step - 1, mtrr_attrib_to_str(*types));
200 static void prepare_set(void);
201 static void post_set(void);
203 /* Grab all of the MTRR state for this CPU into *state */
204 void __init get_mtrr_state(void)
207 struct mtrr_var_range *vrs;
211 vrs = mtrr_state.var_ranges;
213 rdmsr(MTRRcap_MSR, lo, dummy);
214 mtrr_state.have_fixed = (lo >> 8) & 1;
216 for (i = 0; i < num_var_ranges; i++)
217 get_mtrr_var_range(i, &vrs[i]);
218 if (mtrr_state.have_fixed)
219 get_fixed_ranges(mtrr_state.fixed_ranges);
221 rdmsr(MTRRdefType_MSR, lo, dummy);
222 mtrr_state.def_type = (lo & 0xff);
223 mtrr_state.enabled = (lo & 0xc00) >> 10;
225 if (amd_special_default_mtrr()) {
228 rdmsr(MSR_K8_TOP_MEM2, low, high);
232 mtrr_tom2 &= 0xffffff800000ULL;
237 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
238 if (mtrr_state.have_fixed) {
239 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
240 mtrr_state.enabled & 1 ? "en" : "dis");
241 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
242 for (i = 0; i < 2; ++i)
243 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
244 for (i = 0; i < 8; ++i)
245 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
247 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
248 mtrr_state.enabled & 2 ? "en" : "dis");
249 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
250 for (i = 0; i < num_var_ranges; ++i) {
251 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
252 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
255 mtrr_state.var_ranges[i].base_hi,
256 mtrr_state.var_ranges[i].base_lo >> 12,
258 mtrr_state.var_ranges[i].mask_hi,
259 mtrr_state.var_ranges[i].mask_lo >> 12,
260 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
262 printk(KERN_INFO "MTRR %u disabled\n", i);
265 printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
266 mtrr_tom2, mtrr_tom2>>20);
271 /* PAT setup for BP. We need to go through sync steps here */
272 local_irq_save(flags);
278 local_irq_restore(flags);
282 /* Some BIOS's are fucked and don't set all MTRRs the same! */
283 void __init mtrr_state_warn(void)
285 unsigned long mask = smp_changes_mask;
289 if (mask & MTRR_CHANGE_MASK_FIXED)
290 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
291 if (mask & MTRR_CHANGE_MASK_VARIABLE)
292 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
293 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
294 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
295 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
296 printk(KERN_INFO "mtrr: corrected configuration.\n");
299 /* Doesn't attempt to pass an error out to MTRR users
300 because it's quite complicated in some cases and probably not
301 worth it because the best error handling is to ignore it. */
302 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
304 if (wrmsr_safe(msr, a, b) < 0)
306 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
307 smp_processor_id(), msr, a, b);
311 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
312 * see AMD publication no. 24593, chapter 3.2.1 for more information
314 static inline void k8_enable_fixed_iorrs(void)
318 rdmsr(MSR_K8_SYSCFG, lo, hi);
319 mtrr_wrmsr(MSR_K8_SYSCFG, lo
320 | K8_MTRRFIXRANGE_DRAM_ENABLE
321 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
325 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
326 * @msr: MSR address of the MTTR which should be checked and updated
327 * @changed: pointer which indicates whether the MTRR needed to be changed
328 * @msrwords: pointer to the MSR values which the MSR should have
330 * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
331 * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
333 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
339 if (lo != msrwords[0] || hi != msrwords[1]) {
340 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
341 (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) &&
342 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
343 k8_enable_fixed_iorrs();
344 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
350 * generic_get_free_region - Get a free MTRR.
351 * @base: The starting (base) address of the region.
352 * @size: The size (in bytes) of the region.
353 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
355 * Returns: The index of the region on success, else negative on error.
357 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
361 unsigned long lbase, lsize;
363 max = num_var_ranges;
364 if (replace_reg >= 0 && replace_reg < max)
366 for (i = 0; i < max; ++i) {
367 mtrr_if->get(i, &lbase, &lsize, <ype);
374 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
375 unsigned long *size, mtrr_type *type)
377 unsigned int mask_lo, mask_hi, base_lo, base_hi;
378 unsigned int tmp, hi;
380 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
381 if ((mask_lo & 0x800) == 0) {
382 /* Invalid (i.e. free) range */
389 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
391 /* Work out the shifted address mask. */
392 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
393 mask_lo = size_or_mask | tmp;
394 /* Expand tmp with high bits to all 1s*/
397 tmp |= ~((1<<(hi - 1)) - 1);
399 if (tmp != mask_lo) {
400 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
405 /* This works correctly if size is a power of two, i.e. a
408 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
409 *type = base_lo & 0xff;
413 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
414 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
416 static int set_fixed_ranges(mtrr_type * frs)
418 unsigned long long *saved = (unsigned long long *) frs;
419 bool changed = false;
422 while (fixed_range_blocks[++block].ranges)
423 for (range=0; range < fixed_range_blocks[block].ranges; range++)
424 set_fixed_range(fixed_range_blocks[block].base_msr + range,
425 &changed, (unsigned int *) saved++);
430 /* Set the MSR pair relating to a var range. Returns TRUE if
432 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
435 bool changed = false;
437 rdmsr(MTRRphysBase_MSR(index), lo, hi);
438 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
439 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
440 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
441 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
445 rdmsr(MTRRphysMask_MSR(index), lo, hi);
447 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
448 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
449 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
450 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
456 static u32 deftype_lo, deftype_hi;
459 * set_mtrr_state - Set the MTRR state for this CPU.
461 * NOTE: The CPU must already be in a safe state for MTRR changes.
462 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
464 static unsigned long set_mtrr_state(void)
467 unsigned long change_mask = 0;
469 for (i = 0; i < num_var_ranges; i++)
470 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
471 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
473 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
474 change_mask |= MTRR_CHANGE_MASK_FIXED;
476 /* Set_mtrr_restore restores the old value of MTRRdefType,
477 so to set it we fiddle with the saved value */
478 if ((deftype_lo & 0xff) != mtrr_state.def_type
479 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
480 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
481 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
488 static unsigned long cr4 = 0;
489 static DEFINE_SPINLOCK(set_atomicity_lock);
492 * Since we are disabling the cache don't allow any interrupts - they
493 * would run extremely slow and would only increase the pain. The caller must
494 * ensure that local interrupts are disabled and are reenabled after post_set()
498 static void prepare_set(void) __acquires(set_atomicity_lock)
502 /* Note that this is not ideal, since the cache is only flushed/disabled
503 for this CPU while the MTRRs are changed, but changing this requires
504 more invasive changes to the way the kernel boots */
506 spin_lock(&set_atomicity_lock);
508 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
509 cr0 = read_cr0() | X86_CR0_CD;
513 /* Save value of CR4 and clear Page Global Enable (bit 7) */
516 write_cr4(cr4 & ~X86_CR4_PGE);
519 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
522 /* Save MTRR state */
523 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
525 /* Disable MTRRs, and set the default type to uncached */
526 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
529 static void post_set(void) __releases(set_atomicity_lock)
531 /* Flush TLBs (no need to flush caches - they are disabled) */
534 /* Intel (P6) standard MTRRs */
535 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
538 write_cr0(read_cr0() & 0xbfffffff);
540 /* Restore value of CR4 */
543 spin_unlock(&set_atomicity_lock);
546 static void generic_set_all(void)
548 unsigned long mask, count;
551 local_irq_save(flags);
554 /* Actually set the state */
555 mask = set_mtrr_state();
561 local_irq_restore(flags);
563 /* Use the atomic bitops to update the global mask */
564 for (count = 0; count < sizeof mask * 8; ++count) {
566 set_bit(count, &smp_changes_mask);
572 static void generic_set_mtrr(unsigned int reg, unsigned long base,
573 unsigned long size, mtrr_type type)
574 /* [SUMMARY] Set variable MTRR register on the local CPU.
575 <reg> The register to set.
576 <base> The base address of the region.
577 <size> The size of the region. If this is 0 the region is disabled.
578 <type> The type of the region.
583 struct mtrr_var_range *vr;
585 vr = &mtrr_state.var_ranges[reg];
587 local_irq_save(flags);
591 /* The invalid bit is kept in the mask, so we simply clear the
592 relevant mask register to disable a range. */
593 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
594 memset(vr, 0, sizeof(struct mtrr_var_range));
596 vr->base_lo = base << PAGE_SHIFT | type;
597 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
598 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
599 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
601 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
602 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
606 local_irq_restore(flags);
609 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
611 unsigned long lbase, last;
613 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
614 and not touch 0x70000000->0x7003FFFF */
615 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
616 boot_cpu_data.x86_model == 1 &&
617 boot_cpu_data.x86_mask <= 7) {
618 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
619 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
622 if (!(base + size < 0x70000 || base > 0x7003F) &&
623 (type == MTRR_TYPE_WRCOMB
624 || type == MTRR_TYPE_WRBACK)) {
625 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
630 /* Check upper bits of base and last are equal and lower bits are 0
631 for base and 1 for last */
632 last = base + size - 1;
633 for (lbase = base; !(lbase & 1) && (last & 1);
634 lbase = lbase >> 1, last = last >> 1) ;
636 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
644 static int generic_have_wrcomb(void)
646 unsigned long config, dummy;
647 rdmsr(MTRRcap_MSR, config, dummy);
648 return (config & (1 << 10));
651 int positive_have_wrcomb(void)
656 /* generic structure...
658 struct mtrr_ops generic_mtrr_ops = {
660 .set_all = generic_set_all,
661 .get = generic_get_mtrr,
662 .get_free_region = generic_get_free_region,
663 .set = generic_set_mtrr,
664 .validate_add_page = generic_validate_add_page,
665 .have_wrcomb = generic_have_wrcomb,