1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/pci.h>
37 #include <linux/smp.h>
38 #include <linux/cpu.h>
39 #include <linux/mutex.h>
43 #include <asm/uaccess.h>
44 #include <asm/processor.h>
48 u32 num_var_ranges = 0;
50 unsigned int *usage_table;
51 static DEFINE_MUTEX(mtrr_mutex);
53 u64 size_or_mask, size_and_mask;
55 static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
57 struct mtrr_ops * mtrr_if = NULL;
59 static void set_mtrr(unsigned int reg, unsigned long base,
60 unsigned long size, mtrr_type type);
63 extern int arr3_protected;
65 #define arr3_protected 0
68 void set_mtrr_ops(struct mtrr_ops * ops)
70 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
71 mtrr_ops[ops->vendor] = ops;
74 /* Returns non-zero if we have the write-combining memory type */
75 static int have_wrcomb(void)
80 if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
81 /* ServerWorks LE chipsets < rev 6 have problems with write-combining
82 Don't allow it and leave room for other chipsets to be tagged */
83 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
84 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
85 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
87 printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
92 /* Intel 450NX errata # 23. Non ascending cacheline evictions to
93 write combining memory may resulting in data corruption */
94 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
95 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
96 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
102 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
105 /* This function returns the number of variable MTRRs */
106 static void __init set_num_var_ranges(void)
108 unsigned long config = 0, dummy;
111 rdmsr(MTRRcap_MSR, config, dummy);
112 } else if (is_cpu(AMD))
114 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
116 num_var_ranges = config & 0xff;
119 static void __init init_table(void)
123 max = num_var_ranges;
124 if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
126 printk(KERN_ERR "mtrr: could not allocate\n");
129 for (i = 0; i < max; i++)
133 struct set_mtrr_data {
136 unsigned long smp_base;
137 unsigned long smp_size;
138 unsigned int smp_reg;
142 static void ipi_handler(void *info)
143 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
148 struct set_mtrr_data *data = info;
151 local_irq_save(flags);
153 atomic_dec(&data->count);
154 while(!atomic_read(&data->gate))
157 /* The master has cleared me to execute */
158 if (data->smp_reg != ~0U)
159 mtrr_if->set(data->smp_reg, data->smp_base,
160 data->smp_size, data->smp_type);
164 atomic_dec(&data->count);
165 while(atomic_read(&data->gate))
168 atomic_dec(&data->count);
169 local_irq_restore(flags);
173 static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
174 return type1 == MTRR_TYPE_UNCACHABLE ||
175 type2 == MTRR_TYPE_UNCACHABLE ||
176 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
177 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
181 * set_mtrr - update mtrrs on all processors
182 * @reg: mtrr in question
187 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
189 * 1. Send IPI to do the following:
190 * 2. Disable Interrupts
191 * 3. Wait for all procs to do so
192 * 4. Enter no-fill cache mode
196 * 8. Disable all range registers
197 * 9. Update the MTRRs
198 * 10. Enable all range registers
199 * 11. Flush all TLBs and caches again
200 * 12. Enter normal cache mode and reenable caching
202 * 14. Wait for buddies to catch up
203 * 15. Enable interrupts.
205 * What does that mean for us? Well, first we set data.count to the number
206 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
207 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
208 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
209 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
210 * differently, so we call mtrr_if->set() callback and let them take care of it.
211 * When they're done, they again decrement data->count and wait for data.gate to
213 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
214 * Everyone then enables interrupts and we all continue on.
216 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
219 static void set_mtrr(unsigned int reg, unsigned long base,
220 unsigned long size, mtrr_type type)
222 struct set_mtrr_data data;
226 data.smp_base = base;
227 data.smp_size = size;
228 data.smp_type = type;
229 atomic_set(&data.count, num_booting_cpus() - 1);
230 /* make sure data.count is visible before unleashing other CPUs */
232 atomic_set(&data.gate,0);
234 /* Start the ball rolling on other CPUs */
235 if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
236 panic("mtrr: timed out waiting for other CPUs\n");
238 local_irq_save(flags);
240 while(atomic_read(&data.count))
243 /* ok, reset count and toggle gate */
244 atomic_set(&data.count, num_booting_cpus() - 1);
246 atomic_set(&data.gate,1);
248 /* do our MTRR business */
251 * We use this same function to initialize the mtrrs on boot.
252 * The state of the boot cpu's mtrrs has been saved, and we want
253 * to replicate across all the APs.
254 * If we're doing that @reg is set to something special...
257 mtrr_if->set(reg,base,size,type);
259 /* wait for the others */
260 while(atomic_read(&data.count))
263 atomic_set(&data.count, num_booting_cpus() - 1);
265 atomic_set(&data.gate,0);
268 * Wait here for everyone to have seen the gate change
269 * So we're the last ones to touch 'data'
271 while(atomic_read(&data.count))
274 local_irq_restore(flags);
278 * mtrr_add_page - Add a memory type region
279 * @base: Physical base address of region in pages (in units of 4 kB!)
280 * @size: Physical size of region in pages (4 kB)
281 * @type: Type of MTRR desired
282 * @increment: If this is true do usage counting on the region
284 * Memory type region registers control the caching on newer Intel and
285 * non Intel processors. This function allows drivers to request an
286 * MTRR is added. The details and hardware specifics of each processor's
287 * implementation are hidden from the caller, but nevertheless the
288 * caller should expect to need to provide a power of two size on an
289 * equivalent power of two boundary.
291 * If the region cannot be added either because all regions are in use
292 * or the CPU cannot support it a negative value is returned. On success
293 * the register number for this entry is returned, but should be treated
296 * On a multiprocessor machine the changes are made to all processors.
297 * This is required on x86 by the Intel processors.
299 * The available types are
301 * %MTRR_TYPE_UNCACHABLE - No caching
303 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
305 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
307 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
309 * BUGS: Needs a quiet flag for the cases where drivers do not mind
310 * failures and do not wish system log messages to be sent.
313 int mtrr_add_page(unsigned long base, unsigned long size,
314 unsigned int type, char increment)
316 int i, replace, error;
318 unsigned long lbase, lsize;
323 if ((error = mtrr_if->validate_add_page(base,size,type)))
326 if (type >= MTRR_NUM_TYPES) {
327 printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
331 /* If the type is WC, check that this processor supports it */
332 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
334 "mtrr: your processor doesn't support write-combining\n");
339 printk(KERN_WARNING "mtrr: zero sized request\n");
343 if (base & size_or_mask || size & size_or_mask) {
344 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
351 /* No CPU hotplug when we change MTRR entries */
353 /* Search for existing MTRR */
354 mutex_lock(&mtrr_mutex);
355 for (i = 0; i < num_var_ranges; ++i) {
356 mtrr_if->get(i, &lbase, &lsize, <ype);
357 if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
359 /* At this point we know there is some kind of overlap/enclosure */
360 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
361 if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
362 /* New region encloses an existing region */
364 replace = replace == -1 ? i : -2;
367 else if (types_compatible(type, ltype))
371 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
372 " 0x%lx000,0x%lx000\n", base, size, lbase,
376 /* New region is enclosed by an existing region */
378 if (types_compatible(type, ltype))
380 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
381 base, size, mtrr_attrib_to_str(ltype),
382 mtrr_attrib_to_str(type));
390 /* Search for an empty MTRR */
391 i = mtrr_if->get_free_region(base, size, replace);
393 set_mtrr(i, base, size, type);
394 if (likely(replace < 0))
397 usage_table[i] = usage_table[replace] + !!increment;
398 if (unlikely(replace != i)) {
399 set_mtrr(replace, 0, 0, 0);
400 usage_table[replace] = 0;
404 printk(KERN_INFO "mtrr: no more MTRRs available\n");
407 mutex_unlock(&mtrr_mutex);
408 unlock_cpu_hotplug();
412 static int mtrr_check(unsigned long base, unsigned long size)
414 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
416 "mtrr: size and base must be multiples of 4 kiB\n");
418 "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
426 * mtrr_add - Add a memory type region
427 * @base: Physical base address of region
428 * @size: Physical size of region
429 * @type: Type of MTRR desired
430 * @increment: If this is true do usage counting on the region
432 * Memory type region registers control the caching on newer Intel and
433 * non Intel processors. This function allows drivers to request an
434 * MTRR is added. The details and hardware specifics of each processor's
435 * implementation are hidden from the caller, but nevertheless the
436 * caller should expect to need to provide a power of two size on an
437 * equivalent power of two boundary.
439 * If the region cannot be added either because all regions are in use
440 * or the CPU cannot support it a negative value is returned. On success
441 * the register number for this entry is returned, but should be treated
444 * On a multiprocessor machine the changes are made to all processors.
445 * This is required on x86 by the Intel processors.
447 * The available types are
449 * %MTRR_TYPE_UNCACHABLE - No caching
451 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
453 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
455 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
457 * BUGS: Needs a quiet flag for the cases where drivers do not mind
458 * failures and do not wish system log messages to be sent.
462 mtrr_add(unsigned long base, unsigned long size, unsigned int type,
465 if (mtrr_check(base, size))
467 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
472 * mtrr_del_page - delete a memory type region
473 * @reg: Register returned by mtrr_add
474 * @base: Physical base address
475 * @size: Size of region
477 * If register is supplied then base and size are ignored. This is
478 * how drivers should call it.
480 * Releases an MTRR region. If the usage count drops to zero the
481 * register is freed and the region returns to default state.
482 * On success the register is returned, on failure a negative error
486 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
490 unsigned long lbase, lsize;
496 max = num_var_ranges;
497 /* No CPU hotplug when we change MTRR entries */
499 mutex_lock(&mtrr_mutex);
501 /* Search for existing MTRR */
502 for (i = 0; i < max; ++i) {
503 mtrr_if->get(i, &lbase, &lsize, <ype);
504 if (lbase == base && lsize == size) {
510 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
516 printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
519 if (is_cpu(CYRIX) && !use_intel()) {
520 if ((reg == 3) && arr3_protected) {
521 printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
525 mtrr_if->get(reg, &lbase, &lsize, <ype);
527 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
530 if (usage_table[reg] < 1) {
531 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
534 if (--usage_table[reg] < 1)
535 set_mtrr(reg, 0, 0, 0);
538 mutex_unlock(&mtrr_mutex);
539 unlock_cpu_hotplug();
543 * mtrr_del - delete a memory type region
544 * @reg: Register returned by mtrr_add
545 * @base: Physical base address
546 * @size: Size of region
548 * If register is supplied then base and size are ignored. This is
549 * how drivers should call it.
551 * Releases an MTRR region. If the usage count drops to zero the
552 * register is freed and the region returns to default state.
553 * On success the register is returned, on failure a negative error
558 mtrr_del(int reg, unsigned long base, unsigned long size)
560 if (mtrr_check(base, size))
562 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
565 EXPORT_SYMBOL(mtrr_add);
566 EXPORT_SYMBOL(mtrr_del);
569 * These should be called implicitly, but we can't yet until all the initcall
572 extern void amd_init_mtrr(void);
573 extern void cyrix_init_mtrr(void);
574 extern void centaur_init_mtrr(void);
576 static void __init init_ifs(void)
578 #ifndef CONFIG_X86_64
585 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
586 * MTRR driver doesn't require this
594 static struct mtrr_value * mtrr_state;
596 static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
599 int size = num_var_ranges * sizeof(struct mtrr_value);
601 mtrr_state = kzalloc(size,GFP_ATOMIC);
605 for (i = 0; i < num_var_ranges; i++) {
607 &mtrr_state[i].lbase,
608 &mtrr_state[i].lsize,
609 &mtrr_state[i].ltype);
614 static int mtrr_restore(struct sys_device * sysdev)
618 for (i = 0; i < num_var_ranges; i++) {
619 if (mtrr_state[i].lsize)
623 mtrr_state[i].ltype);
631 static struct sysdev_driver mtrr_sysdev_driver = {
632 .suspend = mtrr_save,
633 .resume = mtrr_restore,
638 * mtrr_bp_init - initialize mtrrs on the boot CPU
640 * This needs to be called early; before any of the other CPUs are
641 * initialized (i.e. before smp_init()).
644 void __init mtrr_bp_init(void)
649 mtrr_if = &generic_mtrr_ops;
650 size_or_mask = 0xff000000; /* 36 bits */
651 size_and_mask = 0x00f00000;
653 /* This is an AMD specific MSR, but we assume(hope?) that
654 Intel will implement it to when they extend the address
656 if (cpuid_eax(0x80000000) >= 0x80000008) {
658 phys_addr = cpuid_eax(0x80000008) & 0xff;
659 /* CPUID workaround for Intel 0F33/0F34 CPU */
660 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
661 boot_cpu_data.x86 == 0xF &&
662 boot_cpu_data.x86_model == 0x3 &&
663 (boot_cpu_data.x86_mask == 0x3 ||
664 boot_cpu_data.x86_mask == 0x4))
667 size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
668 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
669 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
670 boot_cpu_data.x86 == 6) {
671 /* VIA C* family have Intel style MTRRs, but
673 size_or_mask = 0xfff00000; /* 32 bits */
677 switch (boot_cpu_data.x86_vendor) {
679 if (cpu_has_k6_mtrr) {
680 /* Pre-Athlon (K6) AMD CPU MTRRs */
681 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
682 size_or_mask = 0xfff00000; /* 32 bits */
686 case X86_VENDOR_CENTAUR:
687 if (cpu_has_centaur_mcr) {
688 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
689 size_or_mask = 0xfff00000; /* 32 bits */
693 case X86_VENDOR_CYRIX:
694 if (cpu_has_cyrix_arr) {
695 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
696 size_or_mask = 0xfff00000; /* 32 bits */
706 set_num_var_ranges();
713 void mtrr_ap_init(void)
717 if (!mtrr_if || !use_intel())
720 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
721 * but this routine will be called in cpu boot time, holding the lock
722 * breaks it. This routine is called in two cases: 1.very earily time
723 * of software resume, when there absolutely isn't mtrr entry changes;
724 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
725 * prevent mtrr entry changes
727 local_irq_save(flags);
731 local_irq_restore(flags);
735 * Save current fixed-range MTRR state of the BSP
737 void mtrr_save_state(void)
739 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
742 static int __init mtrr_init_finialize(void)
749 /* The CPUs haven't MTRR and seem to not support SMP. They have
750 * specific drivers, we use a tricky method to support
751 * suspend/resume for them.
752 * TBD: is there any system with such CPU which supports
753 * suspend/resume? if no, we should remove the code.
755 sysdev_driver_register(&cpu_sysdev_class,
756 &mtrr_sysdev_driver);
760 subsys_initcall(mtrr_init_finialize);