1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/pci.h>
37 #include <linux/smp.h>
38 #include <linux/cpu.h>
39 #include <linux/mutex.h>
40 #include <linux/sort.h>
44 #include <asm/uaccess.h>
45 #include <asm/processor.h>
47 #include <asm/kvm_para.h>
50 u32 num_var_ranges = 0;
52 unsigned int mtrr_usage_table[MAX_VAR_RANGES];
53 static DEFINE_MUTEX(mtrr_mutex);
55 u64 size_or_mask, size_and_mask;
57 static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
59 struct mtrr_ops * mtrr_if = NULL;
61 static void set_mtrr(unsigned int reg, unsigned long base,
62 unsigned long size, mtrr_type type);
64 void set_mtrr_ops(struct mtrr_ops * ops)
66 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
67 mtrr_ops[ops->vendor] = ops;
70 /* Returns non-zero if we have the write-combining memory type */
71 static int have_wrcomb(void)
76 if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
77 /* ServerWorks LE chipsets < rev 6 have problems with write-combining
78 Don't allow it and leave room for other chipsets to be tagged */
79 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
80 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
81 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
83 printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
88 /* Intel 450NX errata # 23. Non ascending cacheline evictions to
89 write combining memory may resulting in data corruption */
90 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
91 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
92 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
98 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
101 /* This function returns the number of variable MTRRs */
102 static void __init set_num_var_ranges(void)
104 unsigned long config = 0, dummy;
107 rdmsr(MTRRcap_MSR, config, dummy);
108 } else if (is_cpu(AMD))
110 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
112 num_var_ranges = config & 0xff;
115 static void __init init_table(void)
119 max = num_var_ranges;
120 for (i = 0; i < max; i++)
121 mtrr_usage_table[i] = 1;
124 struct set_mtrr_data {
127 unsigned long smp_base;
128 unsigned long smp_size;
129 unsigned int smp_reg;
133 static void ipi_handler(void *info)
134 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
139 struct set_mtrr_data *data = info;
142 local_irq_save(flags);
144 atomic_dec(&data->count);
145 while(!atomic_read(&data->gate))
148 /* The master has cleared me to execute */
149 if (data->smp_reg != ~0U)
150 mtrr_if->set(data->smp_reg, data->smp_base,
151 data->smp_size, data->smp_type);
155 atomic_dec(&data->count);
156 while(atomic_read(&data->gate))
159 atomic_dec(&data->count);
160 local_irq_restore(flags);
164 static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
165 return type1 == MTRR_TYPE_UNCACHABLE ||
166 type2 == MTRR_TYPE_UNCACHABLE ||
167 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
168 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
172 * set_mtrr - update mtrrs on all processors
173 * @reg: mtrr in question
178 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
180 * 1. Send IPI to do the following:
181 * 2. Disable Interrupts
182 * 3. Wait for all procs to do so
183 * 4. Enter no-fill cache mode
187 * 8. Disable all range registers
188 * 9. Update the MTRRs
189 * 10. Enable all range registers
190 * 11. Flush all TLBs and caches again
191 * 12. Enter normal cache mode and reenable caching
193 * 14. Wait for buddies to catch up
194 * 15. Enable interrupts.
196 * What does that mean for us? Well, first we set data.count to the number
197 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
198 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
199 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
200 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
201 * differently, so we call mtrr_if->set() callback and let them take care of it.
202 * When they're done, they again decrement data->count and wait for data.gate to
204 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
205 * Everyone then enables interrupts and we all continue on.
207 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
210 static void set_mtrr(unsigned int reg, unsigned long base,
211 unsigned long size, mtrr_type type)
213 struct set_mtrr_data data;
217 data.smp_base = base;
218 data.smp_size = size;
219 data.smp_type = type;
220 atomic_set(&data.count, num_booting_cpus() - 1);
221 /* make sure data.count is visible before unleashing other CPUs */
223 atomic_set(&data.gate,0);
225 /* Start the ball rolling on other CPUs */
226 if (smp_call_function(ipi_handler, &data, 0) != 0)
227 panic("mtrr: timed out waiting for other CPUs\n");
229 local_irq_save(flags);
231 while(atomic_read(&data.count))
234 /* ok, reset count and toggle gate */
235 atomic_set(&data.count, num_booting_cpus() - 1);
237 atomic_set(&data.gate,1);
239 /* do our MTRR business */
242 * We use this same function to initialize the mtrrs on boot.
243 * The state of the boot cpu's mtrrs has been saved, and we want
244 * to replicate across all the APs.
245 * If we're doing that @reg is set to something special...
248 mtrr_if->set(reg,base,size,type);
250 /* wait for the others */
251 while(atomic_read(&data.count))
254 atomic_set(&data.count, num_booting_cpus() - 1);
256 atomic_set(&data.gate,0);
259 * Wait here for everyone to have seen the gate change
260 * So we're the last ones to touch 'data'
262 while(atomic_read(&data.count))
265 local_irq_restore(flags);
269 * mtrr_add_page - Add a memory type region
270 * @base: Physical base address of region in pages (in units of 4 kB!)
271 * @size: Physical size of region in pages (4 kB)
272 * @type: Type of MTRR desired
273 * @increment: If this is true do usage counting on the region
275 * Memory type region registers control the caching on newer Intel and
276 * non Intel processors. This function allows drivers to request an
277 * MTRR is added. The details and hardware specifics of each processor's
278 * implementation are hidden from the caller, but nevertheless the
279 * caller should expect to need to provide a power of two size on an
280 * equivalent power of two boundary.
282 * If the region cannot be added either because all regions are in use
283 * or the CPU cannot support it a negative value is returned. On success
284 * the register number for this entry is returned, but should be treated
287 * On a multiprocessor machine the changes are made to all processors.
288 * This is required on x86 by the Intel processors.
290 * The available types are
292 * %MTRR_TYPE_UNCACHABLE - No caching
294 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
296 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
298 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
300 * BUGS: Needs a quiet flag for the cases where drivers do not mind
301 * failures and do not wish system log messages to be sent.
304 int mtrr_add_page(unsigned long base, unsigned long size,
305 unsigned int type, bool increment)
307 int i, replace, error;
309 unsigned long lbase, lsize;
314 if ((error = mtrr_if->validate_add_page(base,size,type)))
317 if (type >= MTRR_NUM_TYPES) {
318 printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
322 /* If the type is WC, check that this processor supports it */
323 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
325 "mtrr: your processor doesn't support write-combining\n");
330 printk(KERN_WARNING "mtrr: zero sized request\n");
334 if (base & size_or_mask || size & size_or_mask) {
335 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
342 /* No CPU hotplug when we change MTRR entries */
344 /* Search for existing MTRR */
345 mutex_lock(&mtrr_mutex);
346 for (i = 0; i < num_var_ranges; ++i) {
347 mtrr_if->get(i, &lbase, &lsize, <ype);
348 if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
350 /* At this point we know there is some kind of overlap/enclosure */
351 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
352 if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
353 /* New region encloses an existing region */
355 replace = replace == -1 ? i : -2;
358 else if (types_compatible(type, ltype))
362 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
363 " 0x%lx000,0x%lx000\n", base, size, lbase,
367 /* New region is enclosed by an existing region */
369 if (types_compatible(type, ltype))
371 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
372 base, size, mtrr_attrib_to_str(ltype),
373 mtrr_attrib_to_str(type));
377 ++mtrr_usage_table[i];
381 /* Search for an empty MTRR */
382 i = mtrr_if->get_free_region(base, size, replace);
384 set_mtrr(i, base, size, type);
385 if (likely(replace < 0)) {
386 mtrr_usage_table[i] = 1;
388 mtrr_usage_table[i] = mtrr_usage_table[replace];
390 mtrr_usage_table[i]++;
391 if (unlikely(replace != i)) {
392 set_mtrr(replace, 0, 0, 0);
393 mtrr_usage_table[replace] = 0;
397 printk(KERN_INFO "mtrr: no more MTRRs available\n");
400 mutex_unlock(&mtrr_mutex);
405 static int mtrr_check(unsigned long base, unsigned long size)
407 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
409 "mtrr: size and base must be multiples of 4 kiB\n");
411 "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
419 * mtrr_add - Add a memory type region
420 * @base: Physical base address of region
421 * @size: Physical size of region
422 * @type: Type of MTRR desired
423 * @increment: If this is true do usage counting on the region
425 * Memory type region registers control the caching on newer Intel and
426 * non Intel processors. This function allows drivers to request an
427 * MTRR is added. The details and hardware specifics of each processor's
428 * implementation are hidden from the caller, but nevertheless the
429 * caller should expect to need to provide a power of two size on an
430 * equivalent power of two boundary.
432 * If the region cannot be added either because all regions are in use
433 * or the CPU cannot support it a negative value is returned. On success
434 * the register number for this entry is returned, but should be treated
437 * On a multiprocessor machine the changes are made to all processors.
438 * This is required on x86 by the Intel processors.
440 * The available types are
442 * %MTRR_TYPE_UNCACHABLE - No caching
444 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
446 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
448 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
450 * BUGS: Needs a quiet flag for the cases where drivers do not mind
451 * failures and do not wish system log messages to be sent.
455 mtrr_add(unsigned long base, unsigned long size, unsigned int type,
458 if (mtrr_check(base, size))
460 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
465 * mtrr_del_page - delete a memory type region
466 * @reg: Register returned by mtrr_add
467 * @base: Physical base address
468 * @size: Size of region
470 * If register is supplied then base and size are ignored. This is
471 * how drivers should call it.
473 * Releases an MTRR region. If the usage count drops to zero the
474 * register is freed and the region returns to default state.
475 * On success the register is returned, on failure a negative error
479 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
483 unsigned long lbase, lsize;
489 max = num_var_ranges;
490 /* No CPU hotplug when we change MTRR entries */
492 mutex_lock(&mtrr_mutex);
494 /* Search for existing MTRR */
495 for (i = 0; i < max; ++i) {
496 mtrr_if->get(i, &lbase, &lsize, <ype);
497 if (lbase == base && lsize == size) {
503 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
509 printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
512 mtrr_if->get(reg, &lbase, &lsize, <ype);
514 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
517 if (mtrr_usage_table[reg] < 1) {
518 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
521 if (--mtrr_usage_table[reg] < 1)
522 set_mtrr(reg, 0, 0, 0);
525 mutex_unlock(&mtrr_mutex);
530 * mtrr_del - delete a memory type region
531 * @reg: Register returned by mtrr_add
532 * @base: Physical base address
533 * @size: Size of region
535 * If register is supplied then base and size are ignored. This is
536 * how drivers should call it.
538 * Releases an MTRR region. If the usage count drops to zero the
539 * register is freed and the region returns to default state.
540 * On success the register is returned, on failure a negative error
545 mtrr_del(int reg, unsigned long base, unsigned long size)
547 if (mtrr_check(base, size))
549 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
552 EXPORT_SYMBOL(mtrr_add);
553 EXPORT_SYMBOL(mtrr_del);
556 * These should be called implicitly, but we can't yet until all the initcall
559 static void __init init_ifs(void)
561 #ifndef CONFIG_X86_64
568 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
569 * MTRR driver doesn't require this
577 static struct mtrr_value mtrr_state[MAX_VAR_RANGES];
579 static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
583 for (i = 0; i < num_var_ranges; i++) {
585 &mtrr_state[i].lbase,
586 &mtrr_state[i].lsize,
587 &mtrr_state[i].ltype);
592 static int mtrr_restore(struct sys_device * sysdev)
596 for (i = 0; i < num_var_ranges; i++) {
597 if (mtrr_state[i].lsize)
601 mtrr_state[i].ltype);
608 static struct sysdev_driver mtrr_sysdev_driver = {
609 .suspend = mtrr_save,
610 .resume = mtrr_restore,
613 /* should be related to MTRR_VAR_RANGES nums */
614 #define RANGE_NUM 256
622 add_range(struct res_range *range, int nr_range, unsigned long start,
626 if (nr_range >= RANGE_NUM)
629 range[nr_range].start = start;
630 range[nr_range].end = end;
638 add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
643 /* try to merge it with old one */
644 for (i = 0; i < nr_range; i++) {
645 unsigned long final_start, final_end;
646 unsigned long common_start, common_end;
651 common_start = max(range[i].start, start);
652 common_end = min(range[i].end, end);
653 if (common_start > common_end + 1)
656 final_start = min(range[i].start, start);
657 final_end = max(range[i].end, end);
659 range[i].start = final_start;
660 range[i].end = final_end;
664 /* need to add that */
665 return add_range(range, nr_range, start, end);
669 subtract_range(struct res_range *range, unsigned long start, unsigned long end)
673 for (j = 0; j < RANGE_NUM; j++) {
677 if (start <= range[j].start && end >= range[j].end) {
683 if (start <= range[j].start && end < range[j].end &&
684 range[j].start < end + 1) {
685 range[j].start = end + 1;
690 if (start > range[j].start && end >= range[j].end &&
691 range[j].end > start - 1) {
692 range[j].end = start - 1;
696 if (start > range[j].start && end < range[j].end) {
697 /* find the new spare */
698 for (i = 0; i < RANGE_NUM; i++) {
699 if (range[i].end == 0)
703 range[i].end = range[j].end;
704 range[i].start = end + 1;
706 printk(KERN_ERR "run of slot in ranges\n");
708 range[j].end = start - 1;
714 static int __init cmp_range(const void *x1, const void *x2)
716 const struct res_range *r1 = x1;
717 const struct res_range *r2 = x2;
723 return start1 - start2;
726 struct var_mtrr_range_state {
727 unsigned long base_pfn;
728 unsigned long size_pfn;
732 static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
733 static int __initdata debug_print;
736 x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
737 unsigned long extra_remove_base,
738 unsigned long extra_remove_size)
740 unsigned long i, base, size;
743 for (i = 0; i < num_var_ranges; i++) {
744 type = range_state[i].type;
745 if (type != MTRR_TYPE_WRBACK)
747 base = range_state[i].base_pfn;
748 size = range_state[i].size_pfn;
749 nr_range = add_range_with_merge(range, nr_range, base,
753 printk(KERN_DEBUG "After WB checking\n");
754 for (i = 0; i < nr_range; i++)
755 printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
756 range[i].start, range[i].end + 1);
759 /* take out UC ranges */
760 for (i = 0; i < num_var_ranges; i++) {
761 type = range_state[i].type;
762 if (type != MTRR_TYPE_UNCACHABLE &&
763 type != MTRR_TYPE_WRPROT)
765 size = range_state[i].size_pfn;
768 base = range_state[i].base_pfn;
769 subtract_range(range, base, base + size - 1);
771 if (extra_remove_size)
772 subtract_range(range, extra_remove_base,
773 extra_remove_base + extra_remove_size - 1);
775 /* get new range num */
777 for (i = 0; i < RANGE_NUM; i++) {
783 printk(KERN_DEBUG "After UC checking\n");
784 for (i = 0; i < nr_range; i++)
785 printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
786 range[i].start, range[i].end + 1);
789 /* sort the ranges */
790 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
792 printk(KERN_DEBUG "After sorting\n");
793 for (i = 0; i < nr_range; i++)
794 printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
795 range[i].start, range[i].end + 1);
798 /* clear those is not used */
799 for (i = nr_range; i < RANGE_NUM; i++)
800 memset(&range[i], 0, sizeof(range[i]));
805 static struct res_range __initdata range[RANGE_NUM];
807 #ifdef CONFIG_MTRR_SANITIZER
809 static unsigned long __init sum_ranges(struct res_range *range, int nr_range)
815 for (i = 0; i < nr_range; i++)
816 sum += range[i].end + 1 - range[i].start;
821 static int enable_mtrr_cleanup __initdata =
822 CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT;
824 static int __init disable_mtrr_cleanup_setup(char *str)
826 if (enable_mtrr_cleanup != -1)
827 enable_mtrr_cleanup = 0;
830 early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup);
832 static int __init enable_mtrr_cleanup_setup(char *str)
834 if (enable_mtrr_cleanup != -1)
835 enable_mtrr_cleanup = 1;
838 early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
840 static int __init mtrr_cleanup_debug_setup(char *str)
845 early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
847 struct var_mtrr_state {
848 unsigned long range_startk;
849 unsigned long range_sizek;
850 unsigned long chunk_sizek;
851 unsigned long gran_sizek;
856 set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
857 unsigned char type, unsigned int address_bits)
859 u32 base_lo, base_hi, mask_lo, mask_hi;
863 fill_mtrr_var_range(reg, 0, 0, 0, 0);
867 mask = (1ULL << address_bits) - 1;
868 mask &= ~((((u64)sizek) << 10) - 1);
870 base = ((u64)basek) << 10;
875 base_lo = base & ((1ULL<<32) - 1);
876 base_hi = base >> 32;
878 mask_lo = mask & ((1ULL<<32) - 1);
879 mask_hi = mask >> 32;
881 fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi);
885 save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
888 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
889 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
890 range_state[reg].type = type;
894 set_var_mtrr_all(unsigned int address_bits)
896 unsigned long basek, sizek;
900 for (reg = 0; reg < num_var_ranges; reg++) {
901 basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10);
902 sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
903 type = range_state[reg].type;
905 set_var_mtrr(reg, basek, sizek, type, address_bits);
909 static unsigned long to_size_factor(unsigned long sizek, char *factorp)
912 unsigned long base = sizek;
914 if (base & ((1<<10) - 1)) {
915 /* not MB alignment */
917 } else if (base & ((1<<20) - 1)){
930 static unsigned int __init
931 range_to_mtrr(unsigned int reg, unsigned long range_startk,
932 unsigned long range_sizek, unsigned char type)
934 if (!range_sizek || (reg >= num_var_ranges))
937 while (range_sizek) {
938 unsigned long max_align, align;
941 /* Compute the maximum size I can make a range */
943 max_align = ffs(range_startk) - 1;
946 align = fls(range_sizek) - 1;
947 if (align > max_align)
952 char start_factor = 'K', size_factor = 'K';
953 unsigned long start_base, size_base;
955 start_base = to_size_factor(range_startk, &start_factor),
956 size_base = to_size_factor(sizek, &size_factor),
958 printk(KERN_DEBUG "Setting variable MTRR %d, "
959 "base: %ld%cB, range: %ld%cB, type %s\n",
960 reg, start_base, start_factor,
961 size_base, size_factor,
962 (type == MTRR_TYPE_UNCACHABLE)?"UC":
963 ((type == MTRR_TYPE_WRBACK)?"WB":"Other")
966 save_var_mtrr(reg++, range_startk, sizek, type);
967 range_startk += sizek;
968 range_sizek -= sizek;
969 if (reg >= num_var_ranges)
975 static unsigned __init
976 range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
979 unsigned long hole_basek, hole_sizek;
980 unsigned long second_basek, second_sizek;
981 unsigned long range0_basek, range0_sizek;
982 unsigned long range_basek, range_sizek;
983 unsigned long chunk_sizek;
984 unsigned long gran_sizek;
990 chunk_sizek = state->chunk_sizek;
991 gran_sizek = state->gran_sizek;
993 /* align with gran size, prevent small block used up MTRRs */
994 range_basek = ALIGN(state->range_startk, gran_sizek);
995 if ((range_basek > basek) && basek)
997 state->range_sizek -= (range_basek - state->range_startk);
998 range_sizek = ALIGN(state->range_sizek, gran_sizek);
1000 while (range_sizek > state->range_sizek) {
1001 range_sizek -= gran_sizek;
1005 state->range_sizek = range_sizek;
1007 /* try to append some small hole */
1008 range0_basek = state->range_startk;
1009 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
1012 if (range0_sizek == state->range_sizek) {
1014 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
1016 (range0_basek + state->range_sizek)<<10);
1017 state->reg = range_to_mtrr(state->reg, range0_basek,
1018 state->range_sizek, MTRR_TYPE_WRBACK);
1022 /* only cut back, when it is not the last */
1024 while (range0_basek + range0_sizek > (basek + sizek)) {
1025 if (range0_sizek >= chunk_sizek)
1026 range0_sizek -= chunk_sizek;
1036 range_basek = range0_basek + range0_sizek;
1038 /* one hole in the middle */
1039 if (range_basek > basek && range_basek <= (basek + sizek))
1040 second_sizek = range_basek - basek;
1042 if (range0_sizek > state->range_sizek) {
1044 /* one hole in middle or at end */
1045 hole_sizek = range0_sizek - state->range_sizek - second_sizek;
1047 /* hole size should be less than half of range0 size */
1048 if (hole_sizek >= (range0_sizek >> 1) &&
1049 range0_sizek >= chunk_sizek) {
1050 range0_sizek -= chunk_sizek;
1060 printk(KERN_DEBUG "range0: %016lx - %016lx\n",
1062 (range0_basek + range0_sizek)<<10);
1063 state->reg = range_to_mtrr(state->reg, range0_basek,
1064 range0_sizek, MTRR_TYPE_WRBACK);
1067 if (range0_sizek < state->range_sizek) {
1068 /* need to handle left over */
1069 range_sizek = state->range_sizek - range0_sizek;
1072 printk(KERN_DEBUG "range: %016lx - %016lx\n",
1074 (range_basek + range_sizek)<<10);
1075 state->reg = range_to_mtrr(state->reg, range_basek,
1076 range_sizek, MTRR_TYPE_WRBACK);
1080 hole_basek = range_basek - hole_sizek - second_sizek;
1082 printk(KERN_DEBUG "hole: %016lx - %016lx\n",
1084 (hole_basek + hole_sizek)<<10);
1085 state->reg = range_to_mtrr(state->reg, hole_basek,
1086 hole_sizek, MTRR_TYPE_UNCACHABLE);
1089 return second_sizek;
1093 set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
1094 unsigned long size_pfn)
1096 unsigned long basek, sizek;
1097 unsigned long second_sizek = 0;
1099 if (state->reg >= num_var_ranges)
1102 basek = base_pfn << (PAGE_SHIFT - 10);
1103 sizek = size_pfn << (PAGE_SHIFT - 10);
1105 /* See if I can merge with the last range */
1106 if ((basek <= 1024) ||
1107 (state->range_startk + state->range_sizek == basek)) {
1108 unsigned long endk = basek + sizek;
1109 state->range_sizek = endk - state->range_startk;
1112 /* Write the range mtrrs */
1113 if (state->range_sizek != 0)
1114 second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
1116 /* Allocate an msr */
1117 state->range_startk = basek + second_sizek;
1118 state->range_sizek = sizek - second_sizek;
1121 /* mininum size of mtrr block that can take hole */
1122 static u64 mtrr_chunk_size __initdata = (256ULL<<20);
1124 static int __init parse_mtrr_chunk_size_opt(char *p)
1128 mtrr_chunk_size = memparse(p, &p);
1131 early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
1133 /* granity of mtrr of block */
1134 static u64 mtrr_gran_size __initdata;
1136 static int __init parse_mtrr_gran_size_opt(char *p)
1140 mtrr_gran_size = memparse(p, &p);
1143 early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
1145 static int nr_mtrr_spare_reg __initdata =
1146 CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
1148 static int __init parse_mtrr_spare_reg(char *arg)
1151 nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
1155 early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
1158 x86_setup_var_mtrrs(struct res_range *range, int nr_range,
1159 u64 chunk_size, u64 gran_size)
1161 struct var_mtrr_state var_state;
1165 var_state.range_startk = 0;
1166 var_state.range_sizek = 0;
1168 var_state.chunk_sizek = chunk_size >> 10;
1169 var_state.gran_sizek = gran_size >> 10;
1171 memset(range_state, 0, sizeof(range_state));
1173 /* Write the range etc */
1174 for (i = 0; i < nr_range; i++)
1175 set_var_mtrr_range(&var_state, range[i].start,
1176 range[i].end - range[i].start + 1);
1178 /* Write the last range */
1179 if (var_state.range_sizek != 0)
1180 range_to_mtrr_with_hole(&var_state, 0, 0);
1182 num_reg = var_state.reg;
1183 /* Clear out the extra MTRR's */
1184 while (var_state.reg < num_var_ranges) {
1185 save_var_mtrr(var_state.reg, 0, 0, 0);
1192 struct mtrr_cleanup_result {
1193 unsigned long gran_sizek;
1194 unsigned long chunk_sizek;
1195 unsigned long lose_cover_sizek;
1196 unsigned int num_reg;
1201 * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
1202 * chunk size: gran_size, ..., 2G
1203 * so we need (1+16)*8
1205 #define NUM_RESULT 136
1206 #define PSHIFT (PAGE_SHIFT - 10)
1208 static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
1209 static struct res_range __initdata range_new[RANGE_NUM];
1210 static unsigned long __initdata min_loss_pfn[RANGE_NUM];
1212 static int __init mtrr_cleanup(unsigned address_bits)
1214 unsigned long extra_remove_base, extra_remove_size;
1215 unsigned long base, size, def, dummy;
1217 int nr_range, nr_range_new;
1218 u64 chunk_size, gran_size;
1219 unsigned long range_sums, range_sums_new;
1224 /* extra one for all 0 */
1225 int num[MTRR_NUM_TYPES + 1];
1227 if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
1229 rdmsr(MTRRdefType_MSR, def, dummy);
1231 if (def != MTRR_TYPE_UNCACHABLE)
1234 /* get it and store it aside */
1235 memset(range_state, 0, sizeof(range_state));
1236 for (i = 0; i < num_var_ranges; i++) {
1237 mtrr_if->get(i, &base, &size, &type);
1238 range_state[i].base_pfn = base;
1239 range_state[i].size_pfn = size;
1240 range_state[i].type = type;
1243 /* check entries number */
1244 memset(num, 0, sizeof(num));
1245 for (i = 0; i < num_var_ranges; i++) {
1246 type = range_state[i].type;
1247 size = range_state[i].size_pfn;
1248 if (type >= MTRR_NUM_TYPES)
1251 type = MTRR_NUM_TYPES;
1252 if (type == MTRR_TYPE_WRPROT)
1253 type = MTRR_TYPE_UNCACHABLE;
1257 /* check if we got UC entries */
1258 if (!num[MTRR_TYPE_UNCACHABLE])
1261 /* check if we only had WB and UC */
1262 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
1263 num_var_ranges - num[MTRR_NUM_TYPES])
1266 /* print original var MTRRs at first, for debugging: */
1267 printk(KERN_DEBUG "original variable MTRRs\n");
1268 for (i = 0; i < num_var_ranges; i++) {
1269 char start_factor = 'K', size_factor = 'K';
1270 unsigned long start_base, size_base;
1272 size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
1276 size_base = to_size_factor(size_base, &size_factor),
1277 start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
1278 start_base = to_size_factor(start_base, &start_factor),
1279 type = range_state[i].type;
1281 printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
1282 i, start_base, start_factor,
1283 size_base, size_factor,
1284 (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
1285 ((type == MTRR_TYPE_WRPROT) ? "WP" :
1286 ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
1290 memset(range, 0, sizeof(range));
1291 extra_remove_size = 0;
1292 extra_remove_base = 1 << (32 - PAGE_SHIFT);
1295 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
1296 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
1299 * [0, 1M) should always be coverred by var mtrr with WB
1300 * and fixed mtrrs should take effective before var mtrr for it
1302 nr_range = add_range_with_merge(range, nr_range, 0,
1303 (1ULL<<(20 - PAGE_SHIFT)) - 1);
1304 /* sort the ranges */
1305 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
1307 range_sums = sum_ranges(range, nr_range);
1308 printk(KERN_INFO "total RAM coverred: %ldM\n",
1309 range_sums >> (20 - PAGE_SHIFT));
1311 if (mtrr_chunk_size && mtrr_gran_size) {
1313 char gran_factor, chunk_factor, lose_factor;
1314 unsigned long gran_base, chunk_base, lose_base;
1317 /* convert ranges to var ranges state */
1318 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
1321 /* we got new setting in range_state, check it */
1322 memset(range_new, 0, sizeof(range_new));
1323 nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
1326 range_sums_new = sum_ranges(range_new, nr_range_new);
1329 result[i].chunk_sizek = mtrr_chunk_size >> 10;
1330 result[i].gran_sizek = mtrr_gran_size >> 10;
1331 result[i].num_reg = num_reg;
1332 if (range_sums < range_sums_new) {
1333 result[i].lose_cover_sizek =
1334 (range_sums_new - range_sums) << PSHIFT;
1337 result[i].lose_cover_sizek =
1338 (range_sums - range_sums_new) << PSHIFT;
1340 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1341 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1342 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1343 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1344 result[i].bad?"*BAD*":" ",
1345 gran_base, gran_factor, chunk_base, chunk_factor);
1346 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1347 result[i].num_reg, result[i].bad?"-":"",
1348 lose_base, lose_factor);
1349 if (!result[i].bad) {
1350 set_var_mtrr_all(address_bits);
1353 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
1354 "will find optimal one\n");
1356 memset(result, 0, sizeof(result[0]));
1360 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
1361 memset(result, 0, sizeof(result));
1362 for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
1364 unsigned long gran_base;
1367 gran_base = to_size_factor(gran_size >> 10, &gran_factor);
1369 for (chunk_size = gran_size; chunk_size < (1ULL<<32);
1375 unsigned long chunk_base;
1377 chunk_base = to_size_factor(chunk_size>>10, &chunk_factor),
1378 printk(KERN_INFO "\n");
1379 printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n",
1380 gran_base, gran_factor, chunk_base, chunk_factor);
1382 if (i >= NUM_RESULT)
1385 /* convert ranges to var ranges state */
1386 num_reg = x86_setup_var_mtrrs(range, nr_range,
1387 chunk_size, gran_size);
1389 /* we got new setting in range_state, check it */
1390 memset(range_new, 0, sizeof(range_new));
1391 nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
1392 extra_remove_base, extra_remove_size);
1393 range_sums_new = sum_ranges(range_new, nr_range_new);
1395 result[i].chunk_sizek = chunk_size >> 10;
1396 result[i].gran_sizek = gran_size >> 10;
1397 result[i].num_reg = num_reg;
1398 if (range_sums < range_sums_new) {
1399 result[i].lose_cover_sizek =
1400 (range_sums_new - range_sums) << PSHIFT;
1403 result[i].lose_cover_sizek =
1404 (range_sums - range_sums_new) << PSHIFT;
1406 /* double check it */
1407 if (!result[i].bad && !result[i].lose_cover_sizek) {
1408 if (nr_range_new != nr_range ||
1409 memcmp(range, range_new, sizeof(range)))
1413 if (!result[i].bad && (range_sums - range_sums_new <
1414 min_loss_pfn[num_reg])) {
1415 min_loss_pfn[num_reg] =
1416 range_sums - range_sums_new;
1423 for (i = 0; i < NUM_RESULT; i++) {
1424 char gran_factor, chunk_factor, lose_factor;
1425 unsigned long gran_base, chunk_base, lose_base;
1427 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1428 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1429 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1430 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1431 result[i].bad?"*BAD*":" ",
1432 gran_base, gran_factor, chunk_base, chunk_factor);
1433 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1434 result[i].num_reg, result[i].bad?"-":"",
1435 lose_base, lose_factor);
1438 /* try to find the optimal index */
1439 if (nr_mtrr_spare_reg >= num_var_ranges)
1440 nr_mtrr_spare_reg = num_var_ranges - 1;
1442 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
1443 if (!min_loss_pfn[i])
1448 if (num_reg_good != -1) {
1449 for (i = 0; i < NUM_RESULT; i++) {
1450 if (!result[i].bad &&
1451 result[i].num_reg == num_reg_good &&
1452 !result[i].lose_cover_sizek) {
1459 if (index_good != -1) {
1460 char gran_factor, chunk_factor, lose_factor;
1461 unsigned long gran_base, chunk_base, lose_base;
1463 printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
1465 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1466 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1467 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1468 printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t",
1469 gran_base, gran_factor, chunk_base, chunk_factor);
1470 printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n",
1471 result[i].num_reg, lose_base, lose_factor);
1472 /* convert ranges to var ranges state */
1473 chunk_size = result[i].chunk_sizek;
1475 gran_size = result[i].gran_sizek;
1478 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
1480 set_var_mtrr_all(address_bits);
1484 printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
1485 printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n");
1490 static int __init mtrr_cleanup(unsigned address_bits)
1496 static int __initdata changed_by_mtrr_cleanup;
1498 static int disable_mtrr_trim;
1500 static int __init disable_mtrr_trim_setup(char *str)
1502 disable_mtrr_trim = 1;
1505 early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
1508 * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
1509 * for memory >4GB. Check for that here.
1510 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
1511 * apply to are wrong, but so far we don't know of any such case in the wild.
1513 #define Tom2Enabled (1U << 21)
1514 #define Tom2ForceMemTypeWB (1U << 22)
1516 int __init amd_special_default_mtrr(void)
1520 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1522 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
1524 /* In case some hypervisor doesn't pass SYSCFG through */
1525 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
1528 * Memory between 4GB and top of mem is forced WB by this magic bit.
1529 * Reserved before K8RevF, but should be zero there.
1531 if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
1532 (Tom2Enabled | Tom2ForceMemTypeWB))
1537 static u64 __init real_trim_memory(unsigned long start_pfn,
1538 unsigned long limit_pfn)
1540 u64 trim_start, trim_size;
1541 trim_start = start_pfn;
1542 trim_start <<= PAGE_SHIFT;
1543 trim_size = limit_pfn;
1544 trim_size <<= PAGE_SHIFT;
1545 trim_size -= trim_start;
1547 return e820_update_range(trim_start, trim_size, E820_RAM,
1551 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
1552 * @end_pfn: ending page frame number
1554 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
1555 * memory configurations. This routine checks that the highest MTRR matches
1556 * the end of memory, to make sure the MTRRs having a write back type cover
1557 * all of the memory the kernel is intending to use. If not, it'll trim any
1558 * memory off the end by adjusting end_pfn, removing it from the kernel's
1559 * allocation pools, warning the user with an obnoxious message.
1561 int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1563 unsigned long i, base, size, highest_pfn = 0, def, dummy;
1566 u64 total_trim_size;
1568 /* extra one for all 0 */
1569 int num[MTRR_NUM_TYPES + 1];
1571 * Make sure we only trim uncachable memory on machines that
1572 * support the Intel MTRR architecture:
1574 if (!is_cpu(INTEL) || disable_mtrr_trim)
1576 rdmsr(MTRRdefType_MSR, def, dummy);
1578 if (def != MTRR_TYPE_UNCACHABLE)
1581 /* get it and store it aside */
1582 memset(range_state, 0, sizeof(range_state));
1583 for (i = 0; i < num_var_ranges; i++) {
1584 mtrr_if->get(i, &base, &size, &type);
1585 range_state[i].base_pfn = base;
1586 range_state[i].size_pfn = size;
1587 range_state[i].type = type;
1590 /* Find highest cached pfn */
1591 for (i = 0; i < num_var_ranges; i++) {
1592 type = range_state[i].type;
1593 if (type != MTRR_TYPE_WRBACK)
1595 base = range_state[i].base_pfn;
1596 size = range_state[i].size_pfn;
1597 if (highest_pfn < base + size)
1598 highest_pfn = base + size;
1601 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
1603 WARN(!kvm_para_available(), KERN_WARNING
1604 "WARNING: strange, CPU MTRRs all blank?\n");
1608 /* check entries number */
1609 memset(num, 0, sizeof(num));
1610 for (i = 0; i < num_var_ranges; i++) {
1611 type = range_state[i].type;
1612 if (type >= MTRR_NUM_TYPES)
1614 size = range_state[i].size_pfn;
1616 type = MTRR_NUM_TYPES;
1620 /* no entry for WB? */
1621 if (!num[MTRR_TYPE_WRBACK])
1624 /* check if we only had WB and UC */
1625 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
1626 num_var_ranges - num[MTRR_NUM_TYPES])
1629 memset(range, 0, sizeof(range));
1632 range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
1633 range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1;
1634 if (highest_pfn < range[nr_range].end + 1)
1635 highest_pfn = range[nr_range].end + 1;
1638 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
1640 total_trim_size = 0;
1641 /* check the head */
1643 total_trim_size += real_trim_memory(0, range[0].start);
1644 /* check the holes */
1645 for (i = 0; i < nr_range - 1; i++) {
1646 if (range[i].end + 1 < range[i+1].start)
1647 total_trim_size += real_trim_memory(range[i].end + 1,
1652 if (range[i].end + 1 < end_pfn)
1653 total_trim_size += real_trim_memory(range[i].end + 1,
1656 if (total_trim_size) {
1657 printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
1658 " all of memory, losing %lluMB of RAM.\n",
1659 total_trim_size >> 20);
1661 if (!changed_by_mtrr_cleanup)
1664 printk(KERN_INFO "update e820 for mtrr\n");
1674 * mtrr_bp_init - initialize mtrrs on the boot CPU
1676 * This needs to be called early; before any of the other CPUs are
1677 * initialized (i.e. before smp_init()).
1680 void __init mtrr_bp_init(void)
1688 mtrr_if = &generic_mtrr_ops;
1689 size_or_mask = 0xff000000; /* 36 bits */
1690 size_and_mask = 0x00f00000;
1693 /* This is an AMD specific MSR, but we assume(hope?) that
1694 Intel will implement it to when they extend the address
1696 if (cpuid_eax(0x80000000) >= 0x80000008) {
1697 phys_addr = cpuid_eax(0x80000008) & 0xff;
1698 /* CPUID workaround for Intel 0F33/0F34 CPU */
1699 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1700 boot_cpu_data.x86 == 0xF &&
1701 boot_cpu_data.x86_model == 0x3 &&
1702 (boot_cpu_data.x86_mask == 0x3 ||
1703 boot_cpu_data.x86_mask == 0x4))
1706 size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
1707 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
1708 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
1709 boot_cpu_data.x86 == 6) {
1710 /* VIA C* family have Intel style MTRRs, but
1711 don't support PAE */
1712 size_or_mask = 0xfff00000; /* 32 bits */
1717 switch (boot_cpu_data.x86_vendor) {
1718 case X86_VENDOR_AMD:
1719 if (cpu_has_k6_mtrr) {
1720 /* Pre-Athlon (K6) AMD CPU MTRRs */
1721 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
1722 size_or_mask = 0xfff00000; /* 32 bits */
1726 case X86_VENDOR_CENTAUR:
1727 if (cpu_has_centaur_mcr) {
1728 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
1729 size_or_mask = 0xfff00000; /* 32 bits */
1733 case X86_VENDOR_CYRIX:
1734 if (cpu_has_cyrix_arr) {
1735 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
1736 size_or_mask = 0xfff00000; /* 32 bits */
1746 set_num_var_ranges();
1751 if (mtrr_cleanup(phys_addr)) {
1752 changed_by_mtrr_cleanup = 1;
1760 void mtrr_ap_init(void)
1762 unsigned long flags;
1764 if (!mtrr_if || !use_intel())
1767 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
1768 * but this routine will be called in cpu boot time, holding the lock
1769 * breaks it. This routine is called in two cases: 1.very earily time
1770 * of software resume, when there absolutely isn't mtrr entry changes;
1771 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
1772 * prevent mtrr entry changes
1774 local_irq_save(flags);
1778 local_irq_restore(flags);
1782 * Save current fixed-range MTRR state of the BSP
1784 void mtrr_save_state(void)
1786 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
1789 static int __init mtrr_init_finialize(void)
1794 if (!changed_by_mtrr_cleanup)
1797 /* The CPUs haven't MTRR and seem to not support SMP. They have
1798 * specific drivers, we use a tricky method to support
1799 * suspend/resume for them.
1800 * TBD: is there any system with such CPU which supports
1801 * suspend/resume? if no, we should remove the code.
1803 sysdev_driver_register(&cpu_sysdev_class,
1804 &mtrr_sysdev_driver);
1808 subsys_initcall(mtrr_init_finialize);