1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/tlbflush.h>
16 struct mtrr_var_range *var_ranges;
17 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
18 unsigned char enabled;
19 unsigned char have_fixed;
23 static unsigned long smp_changes_mask;
24 static struct mtrr_state mtrr_state = {};
26 #undef MODULE_PARAM_PREFIX
27 #define MODULE_PARAM_PREFIX "mtrr."
29 static __initdata int mtrr_show;
30 module_param_named(show, mtrr_show, bool, 0);
32 /* Get the MSR pair relating to a var range */
34 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
36 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
37 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
41 get_fixed_ranges(mtrr_type * frs)
43 unsigned int *p = (unsigned int *) frs;
46 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
48 for (i = 0; i < 2; i++)
49 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
50 for (i = 0; i < 8; i++)
51 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
54 static void __init print_fixed(unsigned base, unsigned step, const mtrr_type*types)
58 for (i = 0; i < 8; ++i, ++types, base += step)
59 printk(KERN_INFO "MTRR %05X-%05X %s\n", base, base + step - 1, mtrr_attrib_to_str(*types));
62 /* Grab all of the MTRR state for this CPU into *state */
63 void __init get_mtrr_state(void)
66 struct mtrr_var_range *vrs;
69 if (!mtrr_state.var_ranges) {
70 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
72 if (!mtrr_state.var_ranges)
75 vrs = mtrr_state.var_ranges;
77 rdmsr(MTRRcap_MSR, lo, dummy);
78 mtrr_state.have_fixed = (lo >> 8) & 1;
80 for (i = 0; i < num_var_ranges; i++)
81 get_mtrr_var_range(i, &vrs[i]);
82 if (mtrr_state.have_fixed)
83 get_fixed_ranges(mtrr_state.fixed_ranges);
85 rdmsr(MTRRdefType_MSR, lo, dummy);
86 mtrr_state.def_type = (lo & 0xff);
87 mtrr_state.enabled = (lo & 0xc00) >> 10;
92 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
93 if (mtrr_state.have_fixed) {
94 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
95 mtrr_state.enabled & 1 ? "en" : "dis");
96 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
97 for (i = 0; i < 2; ++i)
98 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
99 for (i = 0; i < 8; ++i)
100 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
102 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
103 mtrr_state.enabled & 2 ? "en" : "dis");
104 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
105 for (i = 0; i < num_var_ranges; ++i) {
106 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
107 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
110 mtrr_state.var_ranges[i].base_hi,
111 mtrr_state.var_ranges[i].base_lo >> 12,
113 mtrr_state.var_ranges[i].mask_hi,
114 mtrr_state.var_ranges[i].mask_lo >> 12,
115 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
117 printk(KERN_INFO "MTRR %u disabled\n", i);
122 /* Some BIOS's are fucked and don't set all MTRRs the same! */
123 void __init mtrr_state_warn(void)
125 unsigned long mask = smp_changes_mask;
129 if (mask & MTRR_CHANGE_MASK_FIXED)
130 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
131 if (mask & MTRR_CHANGE_MASK_VARIABLE)
132 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
133 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
134 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
135 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
136 printk(KERN_INFO "mtrr: corrected configuration.\n");
139 /* Doesn't attempt to pass an error out to MTRR users
140 because it's quite complicated in some cases and probably not
141 worth it because the best error handling is to ignore it. */
142 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
144 if (wrmsr_safe(msr, a, b) < 0)
146 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
147 smp_processor_id(), msr, a, b);
150 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
151 /* [SUMMARY] Get a free MTRR.
152 <base> The starting (base) address of the region.
153 <size> The size (in bytes) of the region.
154 [RETURNS] The index of the region on success, else -1 on error.
159 unsigned long lbase, lsize;
161 max = num_var_ranges;
162 if (replace_reg >= 0 && replace_reg < max)
164 for (i = 0; i < max; ++i) {
165 mtrr_if->get(i, &lbase, &lsize, <ype);
172 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
173 unsigned long *size, mtrr_type *type)
175 unsigned int mask_lo, mask_hi, base_lo, base_hi;
177 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
178 if ((mask_lo & 0x800) == 0) {
179 /* Invalid (i.e. free) range */
186 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
188 /* Work out the shifted address mask. */
189 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
190 | mask_lo >> PAGE_SHIFT;
192 /* This works correctly if size is a power of two, i.e. a
195 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
196 *type = base_lo & 0xff;
199 static int set_fixed_ranges(mtrr_type * frs)
201 unsigned int *p = (unsigned int *) frs;
206 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
207 if (p[0] != lo || p[1] != hi) {
208 mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
212 for (i = 0; i < 2; i++) {
213 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
214 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
215 mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
221 for (i = 0; i < 8; i++) {
222 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
223 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
224 mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
232 /* Set the MSR pair relating to a var range. Returns TRUE if
234 static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
239 rdmsr(MTRRphysBase_MSR(index), lo, hi);
240 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
241 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
242 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
243 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
247 rdmsr(MTRRphysMask_MSR(index), lo, hi);
249 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
250 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
251 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
252 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
258 static u32 deftype_lo, deftype_hi;
260 static unsigned long set_mtrr_state(void)
261 /* [SUMMARY] Set the MTRR state for this CPU.
262 <state> The MTRR state information to read.
263 <ctxt> Some relevant CPU context.
264 [NOTE] The CPU must already be in a safe state for MTRR changes.
265 [RETURNS] 0 if no changes made, else a mask indication what was changed.
269 unsigned long change_mask = 0;
271 for (i = 0; i < num_var_ranges; i++)
272 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
273 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
275 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
276 change_mask |= MTRR_CHANGE_MASK_FIXED;
278 /* Set_mtrr_restore restores the old value of MTRRdefType,
279 so to set it we fiddle with the saved value */
280 if ((deftype_lo & 0xff) != mtrr_state.def_type
281 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
282 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
283 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
290 static unsigned long cr4 = 0;
291 static DEFINE_SPINLOCK(set_atomicity_lock);
294 * Since we are disabling the cache don't allow any interrupts - they
295 * would run extremely slow and would only increase the pain. The caller must
296 * ensure that local interrupts are disabled and are reenabled after post_set()
300 static void prepare_set(void) __acquires(set_atomicity_lock)
304 /* Note that this is not ideal, since the cache is only flushed/disabled
305 for this CPU while the MTRRs are changed, but changing this requires
306 more invasive changes to the way the kernel boots */
308 spin_lock(&set_atomicity_lock);
310 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
311 cr0 = read_cr0() | 0x40000000; /* set CD flag */
315 /* Save value of CR4 and clear Page Global Enable (bit 7) */
318 write_cr4(cr4 & ~X86_CR4_PGE);
321 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
324 /* Save MTRR state */
325 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
327 /* Disable MTRRs, and set the default type to uncached */
328 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
331 static void post_set(void) __releases(set_atomicity_lock)
333 /* Flush TLBs (no need to flush caches - they are disabled) */
336 /* Intel (P6) standard MTRRs */
337 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
340 write_cr0(read_cr0() & 0xbfffffff);
342 /* Restore value of CR4 */
345 spin_unlock(&set_atomicity_lock);
348 static void generic_set_all(void)
350 unsigned long mask, count;
353 local_irq_save(flags);
356 /* Actually set the state */
357 mask = set_mtrr_state();
360 local_irq_restore(flags);
362 /* Use the atomic bitops to update the global mask */
363 for (count = 0; count < sizeof mask * 8; ++count) {
365 set_bit(count, &smp_changes_mask);
371 static void generic_set_mtrr(unsigned int reg, unsigned long base,
372 unsigned long size, mtrr_type type)
373 /* [SUMMARY] Set variable MTRR register on the local CPU.
374 <reg> The register to set.
375 <base> The base address of the region.
376 <size> The size of the region. If this is 0 the region is disabled.
377 <type> The type of the region.
378 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
384 struct mtrr_var_range *vr;
386 vr = &mtrr_state.var_ranges[reg];
388 local_irq_save(flags);
392 /* The invalid bit is kept in the mask, so we simply clear the
393 relevant mask register to disable a range. */
394 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
395 memset(vr, 0, sizeof(struct mtrr_var_range));
397 vr->base_lo = base << PAGE_SHIFT | type;
398 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
399 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
400 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
402 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
403 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
407 local_irq_restore(flags);
410 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
412 unsigned long lbase, last;
414 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
415 and not touch 0x70000000->0x7003FFFF */
416 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
417 boot_cpu_data.x86_model == 1 &&
418 boot_cpu_data.x86_mask <= 7) {
419 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
420 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
423 if (!(base + size < 0x70000 || base > 0x7003F) &&
424 (type == MTRR_TYPE_WRCOMB
425 || type == MTRR_TYPE_WRBACK)) {
426 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
431 if (base + size < 0x100) {
432 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
436 /* Check upper bits of base and last are equal and lower bits are 0
437 for base and 1 for last */
438 last = base + size - 1;
439 for (lbase = base; !(lbase & 1) && (last & 1);
440 lbase = lbase >> 1, last = last >> 1) ;
442 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
450 static int generic_have_wrcomb(void)
452 unsigned long config, dummy;
453 rdmsr(MTRRcap_MSR, config, dummy);
454 return (config & (1 << 10));
457 int positive_have_wrcomb(void)
462 /* generic structure...
464 struct mtrr_ops generic_mtrr_ops = {
466 .set_all = generic_set_all,
467 .get = generic_get_mtrr,
468 .get_free_region = generic_get_free_region,
469 .set = generic_set_mtrr,
470 .validate_add_page = generic_validate_add_page,
471 .have_wrcomb = generic_have_wrcomb,