1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
17 struct fixed_range_block {
18 int base_msr; /* start address of an MTRR block */
19 int ranges; /* number of MTRRs in this block */
22 static struct fixed_range_block fixed_range_blocks[] = {
23 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
24 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
25 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
29 static unsigned long smp_changes_mask;
30 static int mtrr_state_set;
33 struct mtrr_state_type mtrr_state = {};
34 EXPORT_SYMBOL_GPL(mtrr_state);
36 #undef MODULE_PARAM_PREFIX
37 #define MODULE_PARAM_PREFIX "mtrr."
40 module_param_named(show, mtrr_show, bool, 0);
43 * Returns the effective MTRR type for the region
45 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
46 * - 0xFF - when MTRR is not enabled
48 u8 mtrr_type_lookup(u64 start, u64 end)
52 u8 prev_match, curr_match;
57 if (!mtrr_state.enabled)
60 /* Make end inclusive end, instead of exclusive */
63 /* Look in fixed ranges. Just return the type as per start */
64 if (mtrr_state.have_fixed && (start < 0x100000)) {
67 if (start < 0x80000) {
70 return mtrr_state.fixed_ranges[idx];
71 } else if (start < 0xC0000) {
73 idx += ((start - 0x80000) >> 14);
74 return mtrr_state.fixed_ranges[idx];
75 } else if (start < 0x1000000) {
77 idx += ((start - 0xC0000) >> 12);
78 return mtrr_state.fixed_ranges[idx];
83 * Look in variable ranges
84 * Look of multiple ranges matching this address and pick type
85 * as per MTRR precedence
87 if (!(mtrr_state.enabled & 2)) {
88 return mtrr_state.def_type;
92 for (i = 0; i < num_var_ranges; ++i) {
93 unsigned short start_state, end_state;
95 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
98 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
99 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
100 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
101 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
103 start_state = ((start & mask) == (base & mask));
104 end_state = ((end & mask) == (base & mask));
105 if (start_state != end_state)
108 if ((start & mask) != (base & mask)) {
112 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
113 if (prev_match == 0xFF) {
114 prev_match = curr_match;
118 if (prev_match == MTRR_TYPE_UNCACHABLE ||
119 curr_match == MTRR_TYPE_UNCACHABLE) {
120 return MTRR_TYPE_UNCACHABLE;
123 if ((prev_match == MTRR_TYPE_WRBACK &&
124 curr_match == MTRR_TYPE_WRTHROUGH) ||
125 (prev_match == MTRR_TYPE_WRTHROUGH &&
126 curr_match == MTRR_TYPE_WRBACK)) {
127 prev_match = MTRR_TYPE_WRTHROUGH;
128 curr_match = MTRR_TYPE_WRTHROUGH;
131 if (prev_match != curr_match) {
132 return MTRR_TYPE_UNCACHABLE;
137 if (start >= (1ULL<<32) && (end < mtrr_tom2))
138 return MTRR_TYPE_WRBACK;
141 if (prev_match != 0xFF)
144 return mtrr_state.def_type;
147 /* Get the MSR pair relating to a var range */
149 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
151 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
152 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
155 /* fill the MSR pair relating to a var range */
156 void fill_mtrr_var_range(unsigned int index,
157 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
159 struct mtrr_var_range *vr;
161 vr = mtrr_state.var_ranges;
163 vr[index].base_lo = base_lo;
164 vr[index].base_hi = base_hi;
165 vr[index].mask_lo = mask_lo;
166 vr[index].mask_hi = mask_hi;
170 get_fixed_ranges(mtrr_type * frs)
172 unsigned int *p = (unsigned int *) frs;
175 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
177 for (i = 0; i < 2; i++)
178 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
179 for (i = 0; i < 8; i++)
180 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
183 void mtrr_save_fixed_ranges(void *info)
186 get_fixed_ranges(mtrr_state.fixed_ranges);
189 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
193 for (i = 0; i < 8; ++i, ++types, base += step)
194 printk(KERN_INFO "MTRR %05X-%05X %s\n",
195 base, base + step - 1, mtrr_attrib_to_str(*types));
198 static void prepare_set(void);
199 static void post_set(void);
201 /* Grab all of the MTRR state for this CPU into *state */
202 void __init get_mtrr_state(void)
205 struct mtrr_var_range *vrs;
209 vrs = mtrr_state.var_ranges;
211 rdmsr(MTRRcap_MSR, lo, dummy);
212 mtrr_state.have_fixed = (lo >> 8) & 1;
214 for (i = 0; i < num_var_ranges; i++)
215 get_mtrr_var_range(i, &vrs[i]);
216 if (mtrr_state.have_fixed)
217 get_fixed_ranges(mtrr_state.fixed_ranges);
219 rdmsr(MTRRdefType_MSR, lo, dummy);
220 mtrr_state.def_type = (lo & 0xff);
221 mtrr_state.enabled = (lo & 0xc00) >> 10;
223 if (amd_special_default_mtrr()) {
226 rdmsr(MSR_K8_TOP_MEM2, low, high);
230 mtrr_tom2 &= 0xffffff800000ULL;
235 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
236 if (mtrr_state.have_fixed) {
237 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
238 mtrr_state.enabled & 1 ? "en" : "dis");
239 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
240 for (i = 0; i < 2; ++i)
241 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
242 for (i = 0; i < 8; ++i)
243 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
245 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
246 mtrr_state.enabled & 2 ? "en" : "dis");
247 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
248 for (i = 0; i < num_var_ranges; ++i) {
249 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
250 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
253 mtrr_state.var_ranges[i].base_hi,
254 mtrr_state.var_ranges[i].base_lo >> 12,
256 mtrr_state.var_ranges[i].mask_hi,
257 mtrr_state.var_ranges[i].mask_lo >> 12,
258 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
260 printk(KERN_INFO "MTRR %u disabled\n", i);
263 printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
264 mtrr_tom2, mtrr_tom2>>20);
269 /* PAT setup for BP. We need to go through sync steps here */
270 local_irq_save(flags);
276 local_irq_restore(flags);
280 /* Some BIOS's are fucked and don't set all MTRRs the same! */
281 void __init mtrr_state_warn(void)
283 unsigned long mask = smp_changes_mask;
287 if (mask & MTRR_CHANGE_MASK_FIXED)
288 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
289 if (mask & MTRR_CHANGE_MASK_VARIABLE)
290 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
291 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
292 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
293 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
294 printk(KERN_INFO "mtrr: corrected configuration.\n");
297 /* Doesn't attempt to pass an error out to MTRR users
298 because it's quite complicated in some cases and probably not
299 worth it because the best error handling is to ignore it. */
300 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
302 if (wrmsr_safe(msr, a, b) < 0)
304 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
305 smp_processor_id(), msr, a, b);
309 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
310 * see AMD publication no. 24593, chapter 3.2.1 for more information
312 static inline void k8_enable_fixed_iorrs(void)
316 rdmsr(MSR_K8_SYSCFG, lo, hi);
317 mtrr_wrmsr(MSR_K8_SYSCFG, lo
318 | K8_MTRRFIXRANGE_DRAM_ENABLE
319 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
323 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
324 * @msr: MSR address of the MTTR which should be checked and updated
325 * @changed: pointer which indicates whether the MTRR needed to be changed
326 * @msrwords: pointer to the MSR values which the MSR should have
328 * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
329 * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
331 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
337 if (lo != msrwords[0] || hi != msrwords[1]) {
338 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
339 (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) &&
340 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
341 k8_enable_fixed_iorrs();
342 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
348 * generic_get_free_region - Get a free MTRR.
349 * @base: The starting (base) address of the region.
350 * @size: The size (in bytes) of the region.
351 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
353 * Returns: The index of the region on success, else negative on error.
355 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
359 unsigned long lbase, lsize;
361 max = num_var_ranges;
362 if (replace_reg >= 0 && replace_reg < max)
364 for (i = 0; i < max; ++i) {
365 mtrr_if->get(i, &lbase, &lsize, <ype);
372 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
373 unsigned long *size, mtrr_type *type)
375 unsigned int mask_lo, mask_hi, base_lo, base_hi;
376 unsigned int tmp, hi;
378 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
379 if ((mask_lo & 0x800) == 0) {
380 /* Invalid (i.e. free) range */
387 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
389 /* Work out the shifted address mask. */
390 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
391 mask_lo = size_or_mask | tmp;
392 /* Expand tmp with high bits to all 1s*/
395 tmp |= ~((1<<(hi - 1)) - 1);
397 if (tmp != mask_lo) {
398 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
403 /* This works correctly if size is a power of two, i.e. a
406 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
407 *type = base_lo & 0xff;
411 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
412 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
414 static int set_fixed_ranges(mtrr_type * frs)
416 unsigned long long *saved = (unsigned long long *) frs;
417 bool changed = false;
420 while (fixed_range_blocks[++block].ranges)
421 for (range=0; range < fixed_range_blocks[block].ranges; range++)
422 set_fixed_range(fixed_range_blocks[block].base_msr + range,
423 &changed, (unsigned int *) saved++);
428 /* Set the MSR pair relating to a var range. Returns TRUE if
430 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
433 bool changed = false;
435 rdmsr(MTRRphysBase_MSR(index), lo, hi);
436 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
437 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
438 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
439 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
443 rdmsr(MTRRphysMask_MSR(index), lo, hi);
445 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
446 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
447 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
448 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
454 static u32 deftype_lo, deftype_hi;
457 * set_mtrr_state - Set the MTRR state for this CPU.
459 * NOTE: The CPU must already be in a safe state for MTRR changes.
460 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
462 static unsigned long set_mtrr_state(void)
465 unsigned long change_mask = 0;
467 for (i = 0; i < num_var_ranges; i++)
468 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
469 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
471 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
472 change_mask |= MTRR_CHANGE_MASK_FIXED;
474 /* Set_mtrr_restore restores the old value of MTRRdefType,
475 so to set it we fiddle with the saved value */
476 if ((deftype_lo & 0xff) != mtrr_state.def_type
477 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
478 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
479 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
486 static unsigned long cr4 = 0;
487 static DEFINE_SPINLOCK(set_atomicity_lock);
490 * Since we are disabling the cache don't allow any interrupts - they
491 * would run extremely slow and would only increase the pain. The caller must
492 * ensure that local interrupts are disabled and are reenabled after post_set()
496 static void prepare_set(void) __acquires(set_atomicity_lock)
500 /* Note that this is not ideal, since the cache is only flushed/disabled
501 for this CPU while the MTRRs are changed, but changing this requires
502 more invasive changes to the way the kernel boots */
504 spin_lock(&set_atomicity_lock);
506 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
507 cr0 = read_cr0() | X86_CR0_CD;
511 /* Save value of CR4 and clear Page Global Enable (bit 7) */
514 write_cr4(cr4 & ~X86_CR4_PGE);
517 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
520 /* Save MTRR state */
521 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
523 /* Disable MTRRs, and set the default type to uncached */
524 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
527 static void post_set(void) __releases(set_atomicity_lock)
529 /* Flush TLBs (no need to flush caches - they are disabled) */
532 /* Intel (P6) standard MTRRs */
533 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
536 write_cr0(read_cr0() & 0xbfffffff);
538 /* Restore value of CR4 */
541 spin_unlock(&set_atomicity_lock);
544 static void generic_set_all(void)
546 unsigned long mask, count;
549 local_irq_save(flags);
552 /* Actually set the state */
553 mask = set_mtrr_state();
559 local_irq_restore(flags);
561 /* Use the atomic bitops to update the global mask */
562 for (count = 0; count < sizeof mask * 8; ++count) {
564 set_bit(count, &smp_changes_mask);
570 static void generic_set_mtrr(unsigned int reg, unsigned long base,
571 unsigned long size, mtrr_type type)
572 /* [SUMMARY] Set variable MTRR register on the local CPU.
573 <reg> The register to set.
574 <base> The base address of the region.
575 <size> The size of the region. If this is 0 the region is disabled.
576 <type> The type of the region.
581 struct mtrr_var_range *vr;
583 vr = &mtrr_state.var_ranges[reg];
585 local_irq_save(flags);
589 /* The invalid bit is kept in the mask, so we simply clear the
590 relevant mask register to disable a range. */
591 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
592 memset(vr, 0, sizeof(struct mtrr_var_range));
594 vr->base_lo = base << PAGE_SHIFT | type;
595 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
596 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
597 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
599 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
600 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
604 local_irq_restore(flags);
607 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
609 unsigned long lbase, last;
611 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
612 and not touch 0x70000000->0x7003FFFF */
613 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
614 boot_cpu_data.x86_model == 1 &&
615 boot_cpu_data.x86_mask <= 7) {
616 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
617 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
620 if (!(base + size < 0x70000 || base > 0x7003F) &&
621 (type == MTRR_TYPE_WRCOMB
622 || type == MTRR_TYPE_WRBACK)) {
623 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
628 /* Check upper bits of base and last are equal and lower bits are 0
629 for base and 1 for last */
630 last = base + size - 1;
631 for (lbase = base; !(lbase & 1) && (last & 1);
632 lbase = lbase >> 1, last = last >> 1) ;
634 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
642 static int generic_have_wrcomb(void)
644 unsigned long config, dummy;
645 rdmsr(MTRRcap_MSR, config, dummy);
646 return (config & (1 << 10));
649 int positive_have_wrcomb(void)
654 /* generic structure...
656 struct mtrr_ops generic_mtrr_ops = {
658 .set_all = generic_set_all,
659 .get = generic_get_mtrr,
660 .get_free_region = generic_get_free_region,
661 .set = generic_set_mtrr,
662 .validate_add_page = generic_validate_add_page,
663 .have_wrcomb = generic_have_wrcomb,