2 * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
3 * M (part of the Centrino chipset).
5 * Since the original Pentium M, most new Intel CPUs support Enhanced
8 * Despite the "SpeedStep" in the name, this is almost entirely unlike
9 * traditional SpeedStep.
11 * Modelled on speedstep.c
13 * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/cpufreq.h>
20 #include <linux/sched.h> /* current */
21 #include <linux/delay.h>
22 #include <linux/compiler.h>
24 #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
25 #include <linux/acpi.h>
26 #include <linux/dmi.h>
27 #include <acpi/processor.h>
31 #include <asm/processor.h>
32 #include <asm/cpufeature.h>
34 #define PFX "speedstep-centrino: "
35 #define MAINTAINER "cpufreq@lists.linux.org.uk"
37 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
42 __u8 x86; /* CPU family */
43 __u8 x86_model; /* model */
44 __u8 x86_mask; /* stepping */
56 static const struct cpu_id cpu_ids[] = {
57 [CPU_BANIAS] = { 6, 9, 5 },
58 [CPU_DOTHAN_A1] = { 6, 13, 1 },
59 [CPU_DOTHAN_A2] = { 6, 13, 2 },
60 [CPU_DOTHAN_B0] = { 6, 13, 6 },
61 [CPU_MP4HT_D0] = {15, 3, 4 },
62 [CPU_MP4HT_E0] = {15, 4, 1 },
64 #define N_IDS ARRAY_SIZE(cpu_ids)
68 const struct cpu_id *cpu_id;
69 const char *model_name;
70 unsigned max_freq; /* max clock in kHz */
72 struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
74 static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x);
76 /* Operating points for current CPU */
77 static struct cpu_model *centrino_model[NR_CPUS];
78 static const struct cpu_id *centrino_cpu[NR_CPUS];
80 static struct cpufreq_driver centrino_driver;
82 #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE
84 /* Computes the correct form for IA32_PERF_CTL MSR for a particular
85 frequency/voltage operating point; frequency in MHz, volts in mV.
86 This is stored as "index" in the structure. */
89 .frequency = (mhz) * 1000, \
90 .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \
94 * These voltage tables were derived from the Intel Pentium M
95 * datasheet, document 25261202.pdf, Table 5. I have verified they
96 * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium
100 /* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */
101 static struct cpufreq_frequency_table banias_900[] =
106 { .frequency = CPUFREQ_TABLE_END }
109 /* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */
110 static struct cpufreq_frequency_table banias_1000[] =
116 { .frequency = CPUFREQ_TABLE_END }
119 /* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */
120 static struct cpufreq_frequency_table banias_1100[] =
127 { .frequency = CPUFREQ_TABLE_END }
131 /* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */
132 static struct cpufreq_frequency_table banias_1200[] =
140 { .frequency = CPUFREQ_TABLE_END }
143 /* Intel Pentium M processor 1.30GHz (Banias) */
144 static struct cpufreq_frequency_table banias_1300[] =
151 { .frequency = CPUFREQ_TABLE_END }
154 /* Intel Pentium M processor 1.40GHz (Banias) */
155 static struct cpufreq_frequency_table banias_1400[] =
162 { .frequency = CPUFREQ_TABLE_END }
165 /* Intel Pentium M processor 1.50GHz (Banias) */
166 static struct cpufreq_frequency_table banias_1500[] =
174 { .frequency = CPUFREQ_TABLE_END }
177 /* Intel Pentium M processor 1.60GHz (Banias) */
178 static struct cpufreq_frequency_table banias_1600[] =
186 { .frequency = CPUFREQ_TABLE_END }
189 /* Intel Pentium M processor 1.70GHz (Banias) */
190 static struct cpufreq_frequency_table banias_1700[] =
198 { .frequency = CPUFREQ_TABLE_END }
202 #define _BANIAS(cpuid, max, name) \
204 .model_name = "Intel(R) Pentium(R) M processor " name "MHz", \
205 .max_freq = (max)*1000, \
206 .op_points = banias_##max, \
208 #define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max)
210 /* CPU models, their operating frequency range, and freq/voltage
212 static struct cpu_model models[] =
214 _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"),
224 /* NULL model_name is a wildcard */
225 { &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
226 { &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
227 { &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
228 { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
229 { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
236 static int centrino_cpu_init_table(struct cpufreq_policy *policy)
238 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
239 struct cpu_model *model;
241 for(model = models; model->cpu_id != NULL; model++)
242 if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
243 (model->model_name == NULL ||
244 strcmp(cpu->x86_model_id, model->model_name) == 0))
247 if (model->cpu_id == NULL) {
248 /* No match at all */
249 dprintk("no support for CPU model \"%s\": "
250 "send /proc/cpuinfo to " MAINTAINER "\n",
255 if (model->op_points == NULL) {
256 /* Matched a non-match */
257 dprintk("no table support for CPU model \"%s\"\n",
259 #ifndef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
260 dprintk("try compiling with CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI enabled\n");
265 centrino_model[policy->cpu] = model;
267 dprintk("found \"%s\": max frequency: %dkHz\n",
268 model->model_name, model->max_freq);
274 static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; }
275 #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
277 static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x)
279 if ((c->x86 == x->x86) &&
280 (c->x86_model == x->x86_model) &&
281 (c->x86_mask == x->x86_mask))
286 /* To be called only after centrino_model is initialized */
287 static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
292 * Extract clock in kHz from PERF_CTL value
293 * for centrino, as some DSDTs are buggy.
294 * Ideally, this can be done using the acpi_data structure.
296 if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) ||
297 (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) ||
298 (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) {
299 msr = (msr >> 8) & 0xff;
303 if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points))
307 for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) {
308 if (msr == centrino_model[cpu]->op_points[i].index)
309 return centrino_model[cpu]->op_points[i].frequency;
312 return centrino_model[cpu]->op_points[i-1].frequency;
317 /* Return the current CPU frequency in kHz */
318 static unsigned int get_cur_freq(unsigned int cpu)
322 cpumask_t saved_mask;
324 saved_mask = current->cpus_allowed;
325 set_cpus_allowed(current, cpumask_of_cpu(cpu));
326 if (smp_processor_id() != cpu)
329 rdmsr(MSR_IA32_PERF_STATUS, l, h);
330 clock_freq = extract_clock(l, cpu, 0);
332 if (unlikely(clock_freq == 0)) {
334 * On some CPUs, we can see transient MSR values (which are
335 * not present in _PSS), while CPU is doing some automatic
336 * P-state transition (like TM2). Get the last freq set
339 rdmsr(MSR_IA32_PERF_CTL, l, h);
340 clock_freq = extract_clock(l, cpu, 1);
343 set_cpus_allowed(current, saved_mask);
348 #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
350 static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
353 * centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States
356 * Before doing the actual init, we need to do _PSD related setup whenever
357 * supported by the BIOS. These are handled by this early_init routine.
359 static int centrino_cpu_early_init_acpi(void)
362 struct acpi_processor_performance *data;
364 for_each_possible_cpu(i) {
365 data = kzalloc(sizeof(struct acpi_processor_performance),
368 for_each_possible_cpu(j) {
369 kfree(acpi_perf_data[j]);
370 acpi_perf_data[j] = NULL;
374 acpi_perf_data[i] = data;
377 acpi_processor_preregister_performance(acpi_perf_data);
383 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
384 * or do it in BIOS firmware and won't inform about it to OS. If not
385 * detected, this has a side effect of making CPU run at a different speed
386 * than OS intended it to run at. Detect it and handle it cleanly.
388 static int bios_with_sw_any_bug;
389 static int sw_any_bug_found(struct dmi_system_id *d)
391 bios_with_sw_any_bug = 1;
396 static struct dmi_system_id sw_any_bug_dmi_table[] = {
398 .callback = sw_any_bug_found,
399 .ident = "Supermicro Server X6DLP",
401 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
402 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
403 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
411 * centrino_cpu_init_acpi - register with ACPI P-States library
413 * Register with the ACPI P-States library (part of drivers/acpi/processor.c)
414 * in order to determine correct frequency and voltage pairings by reading
415 * the _PSS of the ACPI DSDT or SSDT tables.
417 static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
419 unsigned long cur_freq;
421 unsigned int cpu = policy->cpu;
422 struct acpi_processor_performance *p;
424 p = acpi_perf_data[cpu];
426 /* register with ACPI core */
427 if (acpi_processor_register_performance(p, cpu)) {
428 dprintk(PFX "obtaining ACPI data failed\n");
432 policy->shared_type = p->shared_type;
434 * Will let policy->cpus know about dependency only when software
435 * coordination is required.
437 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
438 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
439 policy->cpus = p->shared_cpu_map;
443 dmi_check_system(sw_any_bug_dmi_table);
444 if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
445 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
446 policy->cpus = cpu_core_map[cpu];
450 /* verify the acpi_data */
451 if (p->state_count <= 1) {
452 dprintk("No P-States\n");
457 if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
458 (p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
459 dprintk("Invalid control/status registers (%x - %x)\n",
460 p->control_register.space_id, p->status_register.space_id);
465 for (i=0; i<p->state_count; i++) {
466 if (p->states[i].control != p->states[i].status) {
467 dprintk("Different control (%llu) and status values (%llu)\n",
468 p->states[i].control, p->states[i].status);
473 if (!p->states[i].core_frequency) {
474 dprintk("Zero core frequency for state %u\n", i);
479 if (p->states[i].core_frequency > p->states[0].core_frequency) {
480 dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i,
481 p->states[i].core_frequency, p->states[0].core_frequency);
482 p->states[i].core_frequency = 0;
487 centrino_model[cpu] = kzalloc(sizeof(struct cpu_model), GFP_KERNEL);
488 if (!centrino_model[cpu]) {
493 centrino_model[cpu]->model_name=NULL;
494 centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000;
495 centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
496 (p->state_count + 1), GFP_KERNEL);
497 if (!centrino_model[cpu]->op_points) {
502 for (i=0; i<p->state_count; i++) {
503 centrino_model[cpu]->op_points[i].index = p->states[i].control;
504 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
505 dprintk("adding state %i with frequency %u and control value %04x\n",
506 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
508 centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END;
510 cur_freq = get_cur_freq(cpu);
512 for (i=0; i<p->state_count; i++) {
513 if (!p->states[i].core_frequency) {
514 dprintk("skipping state %u\n", i);
515 centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
519 if (extract_clock(centrino_model[cpu]->op_points[i].index, cpu, 0) !=
520 (centrino_model[cpu]->op_points[i].frequency)) {
521 dprintk("Invalid encoded frequency (%u vs. %u)\n",
522 extract_clock(centrino_model[cpu]->op_points[i].index, cpu, 0),
523 centrino_model[cpu]->op_points[i].frequency);
528 if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
532 /* notify BIOS that we exist */
533 acpi_processor_notify_smm(THIS_MODULE);
538 kfree(centrino_model[cpu]->op_points);
540 kfree(centrino_model[cpu]);
542 acpi_processor_unregister_performance(p, cpu);
543 dprintk(PFX "invalid ACPI data\n");
547 static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
548 static inline int centrino_cpu_early_init_acpi(void) { return 0; }
551 static int centrino_cpu_init(struct cpufreq_policy *policy)
553 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
559 /* Only Intel makes Enhanced Speedstep-capable CPUs */
560 if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
563 if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
564 centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
566 if (centrino_cpu_init_acpi(policy)) {
567 if (policy->cpu != 0)
570 for (i = 0; i < N_IDS; i++)
571 if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
575 centrino_cpu[policy->cpu] = &cpu_ids[i];
577 if (!centrino_cpu[policy->cpu]) {
578 dprintk("found unsupported CPU with "
579 "Enhanced SpeedStep: send /proc/cpuinfo to "
584 if (centrino_cpu_init_table(policy)) {
589 /* Check to see if Enhanced SpeedStep is enabled, and try to
591 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
593 if (!(l & (1<<16))) {
595 dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
596 wrmsr(MSR_IA32_MISC_ENABLE, l, h);
598 /* check to see if it stuck */
599 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
600 if (!(l & (1<<16))) {
601 printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n");
606 freq = get_cur_freq(policy->cpu);
608 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
609 policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */
612 dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur);
614 ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points);
618 cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu);
623 static int centrino_cpu_exit(struct cpufreq_policy *policy)
625 unsigned int cpu = policy->cpu;
627 if (!centrino_model[cpu])
630 cpufreq_frequency_table_put_attr(cpu);
632 #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
633 if (!centrino_model[cpu]->model_name) {
634 static struct acpi_processor_performance *p;
636 if (acpi_perf_data[cpu]) {
637 p = acpi_perf_data[cpu];
638 dprintk("unregistering and freeing ACPI data\n");
639 acpi_processor_unregister_performance(p, cpu);
640 kfree(centrino_model[cpu]->op_points);
641 kfree(centrino_model[cpu]);
646 centrino_model[cpu] = NULL;
652 * centrino_verify - verifies a new CPUFreq policy
653 * @policy: new policy
655 * Limit must be within this model's frequency range at least one
658 static int centrino_verify (struct cpufreq_policy *policy)
660 return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points);
664 * centrino_setpolicy - set a new CPUFreq policy
665 * @policy: new policy
666 * @target_freq: the target frequency
667 * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
669 * Sets a new CPUFreq policy.
671 static int centrino_target (struct cpufreq_policy *policy,
672 unsigned int target_freq,
673 unsigned int relation)
675 unsigned int newstate = 0;
676 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
677 struct cpufreq_freqs freqs;
678 cpumask_t online_policy_cpus;
679 cpumask_t saved_mask;
681 cpumask_t covered_cpus;
683 unsigned int j, k, first_cpu, tmp;
685 if (unlikely(centrino_model[cpu] == NULL))
688 if (unlikely(cpufreq_frequency_table_target(policy,
689 centrino_model[cpu]->op_points,
696 #ifdef CONFIG_HOTPLUG_CPU
697 /* cpufreq holds the hotplug lock, so we are safe from here on */
698 cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
700 online_policy_cpus = policy->cpus;
703 saved_mask = current->cpus_allowed;
705 cpus_clear(covered_cpus);
706 for_each_cpu_mask(j, online_policy_cpus) {
708 * Support for SMP systems.
709 * Make sure we are running on CPU that wants to change freq
711 cpus_clear(set_mask);
712 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
713 cpus_or(set_mask, set_mask, online_policy_cpus);
715 cpu_set(j, set_mask);
717 set_cpus_allowed(current, set_mask);
718 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
719 dprintk("couldn't limit to CPUs in this domain\n");
722 /* We haven't started the transition yet. */
728 msr = centrino_model[cpu]->op_points[newstate].index;
731 rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
732 if (msr == (oldmsr & 0xffff)) {
733 dprintk("no change needed - msr was and needs "
734 "to be %x\n", oldmsr);
739 freqs.old = extract_clock(oldmsr, cpu, 0);
740 freqs.new = extract_clock(msr, cpu, 0);
742 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
743 target_freq, freqs.old, freqs.new, msr);
745 for_each_cpu_mask(k, online_policy_cpus) {
747 cpufreq_notify_transition(&freqs,
752 /* all but 16 LSB are reserved, treat them with care */
758 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
759 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
762 cpu_set(j, covered_cpus);
765 for_each_cpu_mask(k, online_policy_cpus) {
767 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
770 if (unlikely(retval)) {
772 * We have failed halfway through the frequency change.
773 * We have sent callbacks to policy->cpus and
774 * MSRs have already been written on coverd_cpus.
778 if (!cpus_empty(covered_cpus)) {
779 for_each_cpu_mask(j, covered_cpus) {
780 set_cpus_allowed(current, cpumask_of_cpu(j));
781 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
786 freqs.new = freqs.old;
788 for_each_cpu_mask(j, online_policy_cpus) {
790 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
791 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
796 set_cpus_allowed(current, saved_mask);
800 static struct freq_attr* centrino_attr[] = {
801 &cpufreq_freq_attr_scaling_available_freqs,
805 static struct cpufreq_driver centrino_driver = {
806 .name = "centrino", /* should be speedstep-centrino,
807 but there's a 16 char limit */
808 .init = centrino_cpu_init,
809 .exit = centrino_cpu_exit,
810 .verify = centrino_verify,
811 .target = centrino_target,
813 .attr = centrino_attr,
814 .owner = THIS_MODULE,
819 * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
821 * Initializes the Enhanced SpeedStep support. Returns -ENODEV on
822 * unsupported devices, -ENOENT if there's no voltage table for this
823 * particular CPU model, -EINVAL on problems during initiatization,
824 * and zero on success.
826 * This is quite picky. Not only does the CPU have to advertise the
827 * "est" flag in the cpuid capability flags, we look for a specific
828 * CPU model and stepping, and we need to have the exact model name in
829 * our voltage tables. That is, be paranoid about not releasing
830 * someone's valuable magic smoke.
832 static int __init centrino_init(void)
834 struct cpuinfo_x86 *cpu = cpu_data;
836 if (!cpu_has(cpu, X86_FEATURE_EST))
839 centrino_cpu_early_init_acpi();
841 return cpufreq_register_driver(¢rino_driver);
844 static void __exit centrino_exit(void)
846 #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
850 cpufreq_unregister_driver(¢rino_driver);
852 #ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
853 for_each_possible_cpu(j) {
854 kfree(acpi_perf_data[j]);
855 acpi_perf_data[j] = NULL;
860 MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
861 MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");
862 MODULE_LICENSE ("GPL");
864 late_initcall(centrino_init);
865 module_exit(centrino_exit);