Pull video-2.6.24 into release branch
[linux-2.6] / drivers / acpi / processor_throttling.c
1 /*
2  * processor_throttling.c - Throttling submodule of the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  */
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35
36 #include <asm/io.h>
37 #include <asm/uaccess.h>
38
39 #include <acpi/acpi_bus.h>
40 #include <acpi/processor.h>
41
42 #define ACPI_PROCESSOR_COMPONENT        0x01000000
43 #define ACPI_PROCESSOR_CLASS            "processor"
44 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
45 ACPI_MODULE_NAME("processor_throttling");
46
47 static int acpi_processor_get_throttling(struct acpi_processor *pr);
48 int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
49
50 /*
51  * _TPC - Throttling Present Capabilities
52  */
53 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
54 {
55         acpi_status status = 0;
56         unsigned long tpc = 0;
57
58         if (!pr)
59                 return -EINVAL;
60         status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
61         if (ACPI_FAILURE(status)) {
62                 if (status != AE_NOT_FOUND) {
63                         ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
64                 }
65                 return -ENODEV;
66         }
67         pr->throttling_platform_limit = (int)tpc;
68         return 0;
69 }
70
71 int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
72 {
73         int result = 0;
74         int throttling_limit;
75         int current_state;
76         struct acpi_processor_limit *limit;
77         int target_state;
78
79         result = acpi_processor_get_platform_limit(pr);
80         if (result) {
81                 /* Throttling Limit is unsupported */
82                 return result;
83         }
84
85         throttling_limit = pr->throttling_platform_limit;
86         if (throttling_limit >= pr->throttling.state_count) {
87                 /* Uncorrect Throttling Limit */
88                 return -EINVAL;
89         }
90
91         current_state = pr->throttling.state;
92         if (current_state > throttling_limit) {
93                 /*
94                  * The current state can meet the requirement of
95                  * _TPC limit. But it is reasonable that OSPM changes
96                  * t-states from high to low for better performance.
97                  * Of course the limit condition of thermal
98                  * and user should be considered.
99                  */
100                 limit = &pr->limit;
101                 target_state = throttling_limit;
102                 if (limit->thermal.tx > target_state)
103                         target_state = limit->thermal.tx;
104                 if (limit->user.tx > target_state)
105                         target_state = limit->user.tx;
106         } else if (current_state == throttling_limit) {
107                 /*
108                  * Unnecessary to change the throttling state
109                  */
110                 return 0;
111         } else {
112                 /*
113                  * If the current state is lower than the limit of _TPC, it
114                  * will be forced to switch to the throttling state defined
115                  * by throttling_platfor_limit.
116                  * Because the previous state meets with the limit condition
117                  * of thermal and user, it is unnecessary to check it again.
118                  */
119                 target_state = throttling_limit;
120         }
121         return acpi_processor_set_throttling(pr, target_state);
122 }
123
124 /*
125  * _PTC - Processor Throttling Control (and status) register location
126  */
127 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
128 {
129         int result = 0;
130         acpi_status status = 0;
131         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
132         union acpi_object *ptc = NULL;
133         union acpi_object obj = { 0 };
134         struct acpi_processor_throttling *throttling;
135
136         status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
137         if (ACPI_FAILURE(status)) {
138                 if (status != AE_NOT_FOUND) {
139                         ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
140                 }
141                 return -ENODEV;
142         }
143
144         ptc = (union acpi_object *)buffer.pointer;
145         if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
146             || (ptc->package.count != 2)) {
147                 printk(KERN_ERR PREFIX "Invalid _PTC data\n");
148                 result = -EFAULT;
149                 goto end;
150         }
151
152         /*
153          * control_register
154          */
155
156         obj = ptc->package.elements[0];
157
158         if ((obj.type != ACPI_TYPE_BUFFER)
159             || (obj.buffer.length < sizeof(struct acpi_ptc_register))
160             || (obj.buffer.pointer == NULL)) {
161                 printk(KERN_ERR PREFIX
162                        "Invalid _PTC data (control_register)\n");
163                 result = -EFAULT;
164                 goto end;
165         }
166         memcpy(&pr->throttling.control_register, obj.buffer.pointer,
167                sizeof(struct acpi_ptc_register));
168
169         /*
170          * status_register
171          */
172
173         obj = ptc->package.elements[1];
174
175         if ((obj.type != ACPI_TYPE_BUFFER)
176             || (obj.buffer.length < sizeof(struct acpi_ptc_register))
177             || (obj.buffer.pointer == NULL)) {
178                 printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
179                 result = -EFAULT;
180                 goto end;
181         }
182
183         memcpy(&pr->throttling.status_register, obj.buffer.pointer,
184                sizeof(struct acpi_ptc_register));
185
186         throttling = &pr->throttling;
187
188         if ((throttling->control_register.bit_width +
189                 throttling->control_register.bit_offset) > 32) {
190                 printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
191                 result = -EFAULT;
192                 goto end;
193         }
194
195         if ((throttling->status_register.bit_width +
196                 throttling->status_register.bit_offset) > 32) {
197                 printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
198                 result = -EFAULT;
199                 goto end;
200         }
201
202       end:
203         kfree(buffer.pointer);
204
205         return result;
206 }
207
208 /*
209  * _TSS - Throttling Supported States
210  */
211 static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
212 {
213         int result = 0;
214         acpi_status status = AE_OK;
215         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
216         struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
217         struct acpi_buffer state = { 0, NULL };
218         union acpi_object *tss = NULL;
219         int i;
220
221         status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
222         if (ACPI_FAILURE(status)) {
223                 if (status != AE_NOT_FOUND) {
224                         ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
225                 }
226                 return -ENODEV;
227         }
228
229         tss = buffer.pointer;
230         if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
231                 printk(KERN_ERR PREFIX "Invalid _TSS data\n");
232                 result = -EFAULT;
233                 goto end;
234         }
235
236         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
237                           tss->package.count));
238
239         pr->throttling.state_count = tss->package.count;
240         pr->throttling.states_tss =
241             kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
242                     GFP_KERNEL);
243         if (!pr->throttling.states_tss) {
244                 result = -ENOMEM;
245                 goto end;
246         }
247
248         for (i = 0; i < pr->throttling.state_count; i++) {
249
250                 struct acpi_processor_tx_tss *tx =
251                     (struct acpi_processor_tx_tss *)&(pr->throttling.
252                                                       states_tss[i]);
253
254                 state.length = sizeof(struct acpi_processor_tx_tss);
255                 state.pointer = tx;
256
257                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
258
259                 status = acpi_extract_package(&(tss->package.elements[i]),
260                                               &format, &state);
261                 if (ACPI_FAILURE(status)) {
262                         ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
263                         result = -EFAULT;
264                         kfree(pr->throttling.states_tss);
265                         goto end;
266                 }
267
268                 if (!tx->freqpercentage) {
269                         printk(KERN_ERR PREFIX
270                                "Invalid _TSS data: freq is zero\n");
271                         result = -EFAULT;
272                         kfree(pr->throttling.states_tss);
273                         goto end;
274                 }
275         }
276
277       end:
278         kfree(buffer.pointer);
279
280         return result;
281 }
282
283 /*
284  * _TSD - T-State Dependencies
285  */
286 static int acpi_processor_get_tsd(struct acpi_processor *pr)
287 {
288         int result = 0;
289         acpi_status status = AE_OK;
290         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
291         struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
292         struct acpi_buffer state = { 0, NULL };
293         union acpi_object *tsd = NULL;
294         struct acpi_tsd_package *pdomain;
295
296         status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
297         if (ACPI_FAILURE(status)) {
298                 if (status != AE_NOT_FOUND) {
299                         ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
300                 }
301                 return -ENODEV;
302         }
303
304         tsd = buffer.pointer;
305         if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
306                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
307                 result = -EFAULT;
308                 goto end;
309         }
310
311         if (tsd->package.count != 1) {
312                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
313                 result = -EFAULT;
314                 goto end;
315         }
316
317         pdomain = &(pr->throttling.domain_info);
318
319         state.length = sizeof(struct acpi_tsd_package);
320         state.pointer = pdomain;
321
322         status = acpi_extract_package(&(tsd->package.elements[0]),
323                                       &format, &state);
324         if (ACPI_FAILURE(status)) {
325                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
326                 result = -EFAULT;
327                 goto end;
328         }
329
330         if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
331                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
332                 result = -EFAULT;
333                 goto end;
334         }
335
336         if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
337                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
338                 result = -EFAULT;
339                 goto end;
340         }
341
342       end:
343         kfree(buffer.pointer);
344         return result;
345 }
346
347 /* --------------------------------------------------------------------------
348                               Throttling Control
349    -------------------------------------------------------------------------- */
350 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
351 {
352         int state = 0;
353         u32 value = 0;
354         u32 duty_mask = 0;
355         u32 duty_value = 0;
356
357         if (!pr)
358                 return -EINVAL;
359
360         if (!pr->flags.throttling)
361                 return -ENODEV;
362
363         pr->throttling.state = 0;
364
365         duty_mask = pr->throttling.state_count - 1;
366
367         duty_mask <<= pr->throttling.duty_offset;
368
369         local_irq_disable();
370
371         value = inl(pr->throttling.address);
372
373         /*
374          * Compute the current throttling state when throttling is enabled
375          * (bit 4 is on).
376          */
377         if (value & 0x10) {
378                 duty_value = value & duty_mask;
379                 duty_value >>= pr->throttling.duty_offset;
380
381                 if (duty_value)
382                         state = pr->throttling.state_count - duty_value;
383         }
384
385         pr->throttling.state = state;
386
387         local_irq_enable();
388
389         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
390                           "Throttling state is T%d (%d%% throttling applied)\n",
391                           state, pr->throttling.states[state].performance));
392
393         return 0;
394 }
395
396 #ifdef CONFIG_X86
397 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
398                                         acpi_integer * value)
399 {
400         struct cpuinfo_x86 *c;
401         u64 msr_high, msr_low;
402         unsigned int cpu;
403         u64 msr = 0;
404         int ret = -1;
405
406         cpu = pr->id;
407         c = &cpu_data(cpu);
408
409         if ((c->x86_vendor != X86_VENDOR_INTEL) ||
410                 !cpu_has(c, X86_FEATURE_ACPI)) {
411                 printk(KERN_ERR PREFIX
412                         "HARDWARE addr space,NOT supported yet\n");
413         } else {
414                 msr_low = 0;
415                 msr_high = 0;
416                 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL,
417                         (u32 *)&msr_low , (u32 *) &msr_high);
418                 msr = (msr_high << 32) | msr_low;
419                 *value = (acpi_integer) msr;
420                 ret = 0;
421         }
422         return ret;
423 }
424
425 static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
426 {
427         struct cpuinfo_x86 *c;
428         unsigned int cpu;
429         int ret = -1;
430         u64 msr;
431
432         cpu = pr->id;
433         c = &cpu_data(cpu);
434
435         if ((c->x86_vendor != X86_VENDOR_INTEL) ||
436                 !cpu_has(c, X86_FEATURE_ACPI)) {
437                 printk(KERN_ERR PREFIX
438                         "HARDWARE addr space,NOT supported yet\n");
439         } else {
440                 msr = value;
441                 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL,
442                         msr & 0xffffffff, msr >> 32);
443                 ret = 0;
444         }
445         return ret;
446 }
447 #else
448 static int acpi_throttling_rdmsr(struct acpi_processor *pr,
449                                 acpi_integer * value)
450 {
451         printk(KERN_ERR PREFIX
452                 "HARDWARE addr space,NOT supported yet\n");
453         return -1;
454 }
455
456 static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
457 {
458         printk(KERN_ERR PREFIX
459                 "HARDWARE addr space,NOT supported yet\n");
460         return -1;
461 }
462 #endif
463
464 static int acpi_read_throttling_status(struct acpi_processor *pr,
465                                         acpi_integer *value)
466 {
467         u32 bit_width, bit_offset;
468         u64 ptc_value;
469         u64 ptc_mask;
470         struct acpi_processor_throttling *throttling;
471         int ret = -1;
472
473         throttling = &pr->throttling;
474         switch (throttling->status_register.space_id) {
475         case ACPI_ADR_SPACE_SYSTEM_IO:
476                 ptc_value = 0;
477                 bit_width = throttling->status_register.bit_width;
478                 bit_offset = throttling->status_register.bit_offset;
479
480                 acpi_os_read_port((acpi_io_address) throttling->status_register.
481                                   address, (u32 *) &ptc_value,
482                                   (u32) (bit_width + bit_offset));
483                 ptc_mask = (1 << bit_width) - 1;
484                 *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
485                 ret = 0;
486                 break;
487         case ACPI_ADR_SPACE_FIXED_HARDWARE:
488                 ret = acpi_throttling_rdmsr(pr, value);
489                 break;
490         default:
491                 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
492                        (u32) (throttling->status_register.space_id));
493         }
494         return ret;
495 }
496
497 static int acpi_write_throttling_state(struct acpi_processor *pr,
498                                 acpi_integer value)
499 {
500         u32 bit_width, bit_offset;
501         u64 ptc_value;
502         u64 ptc_mask;
503         struct acpi_processor_throttling *throttling;
504         int ret = -1;
505
506         throttling = &pr->throttling;
507         switch (throttling->control_register.space_id) {
508         case ACPI_ADR_SPACE_SYSTEM_IO:
509                 bit_width = throttling->control_register.bit_width;
510                 bit_offset = throttling->control_register.bit_offset;
511                 ptc_mask = (1 << bit_width) - 1;
512                 ptc_value = value & ptc_mask;
513
514                 acpi_os_write_port((acpi_io_address) throttling->
515                                         control_register.address,
516                                         (u32) (ptc_value << bit_offset),
517                                         (u32) (bit_width + bit_offset));
518                 ret = 0;
519                 break;
520         case ACPI_ADR_SPACE_FIXED_HARDWARE:
521                 ret = acpi_throttling_wrmsr(pr, value);
522                 break;
523         default:
524                 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
525                        (u32) (throttling->control_register.space_id));
526         }
527         return ret;
528 }
529
530 static int acpi_get_throttling_state(struct acpi_processor *pr,
531                                 acpi_integer value)
532 {
533         int i;
534
535         for (i = 0; i < pr->throttling.state_count; i++) {
536                 struct acpi_processor_tx_tss *tx =
537                     (struct acpi_processor_tx_tss *)&(pr->throttling.
538                                                       states_tss[i]);
539                 if (tx->control == value)
540                         break;
541         }
542         if (i > pr->throttling.state_count)
543                 i = -1;
544         return i;
545 }
546
547 static int acpi_get_throttling_value(struct acpi_processor *pr,
548                         int state, acpi_integer *value)
549 {
550         int ret = -1;
551
552         if (state >= 0 && state <= pr->throttling.state_count) {
553                 struct acpi_processor_tx_tss *tx =
554                     (struct acpi_processor_tx_tss *)&(pr->throttling.
555                                                       states_tss[state]);
556                 *value = tx->control;
557                 ret = 0;
558         }
559         return ret;
560 }
561
562 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
563 {
564         int state = 0;
565         int ret;
566         acpi_integer value;
567
568         if (!pr)
569                 return -EINVAL;
570
571         if (!pr->flags.throttling)
572                 return -ENODEV;
573
574         pr->throttling.state = 0;
575         local_irq_disable();
576         value = 0;
577         ret = acpi_read_throttling_status(pr, &value);
578         if (ret >= 0) {
579                 state = acpi_get_throttling_state(pr, value);
580                 pr->throttling.state = state;
581         }
582         local_irq_enable();
583
584         return 0;
585 }
586
587 static int acpi_processor_get_throttling(struct acpi_processor *pr)
588 {
589         return pr->throttling.acpi_processor_get_throttling(pr);
590 }
591
592 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
593 {
594         int i, step;
595
596         if (!pr->throttling.address) {
597                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
598                 return -EINVAL;
599         } else if (!pr->throttling.duty_width) {
600                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
601                 return -EINVAL;
602         }
603         /* TBD: Support duty_cycle values that span bit 4. */
604         else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
605                 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
606                 return -EINVAL;
607         }
608
609         pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
610
611         /*
612          * Compute state values. Note that throttling displays a linear power
613          * performance relationship (at 50% performance the CPU will consume
614          * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
615          */
616
617         step = (1000 / pr->throttling.state_count);
618
619         for (i = 0; i < pr->throttling.state_count; i++) {
620                 pr->throttling.states[i].performance = 1000 - step * i;
621                 pr->throttling.states[i].power = 1000 - step * i;
622         }
623         return 0;
624 }
625
626 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
627                                               int state)
628 {
629         u32 value = 0;
630         u32 duty_mask = 0;
631         u32 duty_value = 0;
632
633         if (!pr)
634                 return -EINVAL;
635
636         if ((state < 0) || (state > (pr->throttling.state_count - 1)))
637                 return -EINVAL;
638
639         if (!pr->flags.throttling)
640                 return -ENODEV;
641
642         if (state == pr->throttling.state)
643                 return 0;
644
645         if (state < pr->throttling_platform_limit)
646                 return -EPERM;
647         /*
648          * Calculate the duty_value and duty_mask.
649          */
650         if (state) {
651                 duty_value = pr->throttling.state_count - state;
652
653                 duty_value <<= pr->throttling.duty_offset;
654
655                 /* Used to clear all duty_value bits */
656                 duty_mask = pr->throttling.state_count - 1;
657
658                 duty_mask <<= acpi_gbl_FADT.duty_offset;
659                 duty_mask = ~duty_mask;
660         }
661
662         local_irq_disable();
663
664         /*
665          * Disable throttling by writing a 0 to bit 4.  Note that we must
666          * turn it off before you can change the duty_value.
667          */
668         value = inl(pr->throttling.address);
669         if (value & 0x10) {
670                 value &= 0xFFFFFFEF;
671                 outl(value, pr->throttling.address);
672         }
673
674         /*
675          * Write the new duty_value and then enable throttling.  Note
676          * that a state value of 0 leaves throttling disabled.
677          */
678         if (state) {
679                 value &= duty_mask;
680                 value |= duty_value;
681                 outl(value, pr->throttling.address);
682
683                 value |= 0x00000010;
684                 outl(value, pr->throttling.address);
685         }
686
687         pr->throttling.state = state;
688
689         local_irq_enable();
690
691         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
692                           "Throttling state set to T%d (%d%%)\n", state,
693                           (pr->throttling.states[state].performance ? pr->
694                            throttling.states[state].performance / 10 : 0)));
695
696         return 0;
697 }
698
699 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
700                                              int state)
701 {
702         int ret;
703         acpi_integer value;
704
705         if (!pr)
706                 return -EINVAL;
707
708         if ((state < 0) || (state > (pr->throttling.state_count - 1)))
709                 return -EINVAL;
710
711         if (!pr->flags.throttling)
712                 return -ENODEV;
713
714         if (state == pr->throttling.state)
715                 return 0;
716
717         if (state < pr->throttling_platform_limit)
718                 return -EPERM;
719
720         local_irq_disable();
721         value = 0;
722         ret = acpi_get_throttling_value(pr, state, &value);
723         if (ret >= 0) {
724                 acpi_write_throttling_state(pr, value);
725                 pr->throttling.state = state;
726         }
727         local_irq_enable();
728
729         return 0;
730 }
731
732 int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
733 {
734         return pr->throttling.acpi_processor_set_throttling(pr, state);
735 }
736
737 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
738 {
739         int result = 0;
740
741         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
742                           "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
743                           pr->throttling.address,
744                           pr->throttling.duty_offset,
745                           pr->throttling.duty_width));
746
747         if (!pr)
748                 return -EINVAL;
749
750         /*
751          * Evaluate _PTC, _TSS and _TPC
752          * They must all be present or none of them can be used.
753          */
754         if (acpi_processor_get_throttling_control(pr) ||
755                 acpi_processor_get_throttling_states(pr) ||
756                 acpi_processor_get_platform_limit(pr))
757         {
758                 if (acpi_processor_get_fadt_info(pr))
759                         return 0;
760                 pr->throttling.acpi_processor_get_throttling =
761                     &acpi_processor_get_throttling_fadt;
762                 pr->throttling.acpi_processor_set_throttling =
763                     &acpi_processor_set_throttling_fadt;
764         } else {
765                 pr->throttling.acpi_processor_get_throttling =
766                     &acpi_processor_get_throttling_ptc;
767                 pr->throttling.acpi_processor_set_throttling =
768                     &acpi_processor_set_throttling_ptc;
769         }
770
771         acpi_processor_get_tsd(pr);
772
773         /*
774          * PIIX4 Errata: We don't support throttling on the original PIIX4.
775          * This shouldn't be an issue as few (if any) mobile systems ever
776          * used this part.
777          */
778         if (errata.piix4.throttle) {
779                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
780                                   "Throttling not supported on PIIX4 A- or B-step\n"));
781                 return 0;
782         }
783
784         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
785                           pr->throttling.state_count));
786
787         pr->flags.throttling = 1;
788
789         /*
790          * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
791          * thermal) decide to lower performance if it so chooses, but for now
792          * we'll crank up the speed.
793          */
794
795         result = acpi_processor_get_throttling(pr);
796         if (result)
797                 goto end;
798
799         if (pr->throttling.state) {
800                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
801                                   "Disabling throttling (was T%d)\n",
802                                   pr->throttling.state));
803                 result = acpi_processor_set_throttling(pr, 0);
804                 if (result)
805                         goto end;
806         }
807
808       end:
809         if (result)
810                 pr->flags.throttling = 0;
811
812         return result;
813 }
814
815 /* proc interface */
816
817 static int acpi_processor_throttling_seq_show(struct seq_file *seq,
818                                               void *offset)
819 {
820         struct acpi_processor *pr = seq->private;
821         int i = 0;
822         int result = 0;
823
824         if (!pr)
825                 goto end;
826
827         if (!(pr->throttling.state_count > 0)) {
828                 seq_puts(seq, "<not supported>\n");
829                 goto end;
830         }
831
832         result = acpi_processor_get_throttling(pr);
833
834         if (result) {
835                 seq_puts(seq,
836                          "Could not determine current throttling state.\n");
837                 goto end;
838         }
839
840         seq_printf(seq, "state count:             %d\n"
841                    "active state:            T%d\n"
842                    "state available: T%d to T%d\n",
843                    pr->throttling.state_count, pr->throttling.state,
844                    pr->throttling_platform_limit,
845                    pr->throttling.state_count - 1);
846
847         seq_puts(seq, "states:\n");
848         if (pr->throttling.acpi_processor_get_throttling ==
849                         acpi_processor_get_throttling_fadt) {
850                 for (i = 0; i < pr->throttling.state_count; i++)
851                         seq_printf(seq, "   %cT%d:                  %02d%%\n",
852                                    (i == pr->throttling.state ? '*' : ' '), i,
853                                    (pr->throttling.states[i].performance ? pr->
854                                     throttling.states[i].performance / 10 : 0));
855         } else {
856                 for (i = 0; i < pr->throttling.state_count; i++)
857                         seq_printf(seq, "   %cT%d:                  %02d%%\n",
858                                    (i == pr->throttling.state ? '*' : ' '), i,
859                                    (int)pr->throttling.states_tss[i].
860                                    freqpercentage);
861         }
862
863       end:
864         return 0;
865 }
866
867 static int acpi_processor_throttling_open_fs(struct inode *inode,
868                                              struct file *file)
869 {
870         return single_open(file, acpi_processor_throttling_seq_show,
871                            PDE(inode)->data);
872 }
873
874 static ssize_t acpi_processor_write_throttling(struct file *file,
875                                                const char __user * buffer,
876                                                size_t count, loff_t * data)
877 {
878         int result = 0;
879         struct seq_file *m = file->private_data;
880         struct acpi_processor *pr = m->private;
881         char state_string[12] = { '\0' };
882
883         if (!pr || (count > sizeof(state_string) - 1))
884                 return -EINVAL;
885
886         if (copy_from_user(state_string, buffer, count))
887                 return -EFAULT;
888
889         state_string[count] = '\0';
890
891         result = acpi_processor_set_throttling(pr,
892                                                simple_strtoul(state_string,
893                                                               NULL, 0));
894         if (result)
895                 return result;
896
897         return count;
898 }
899
900 struct file_operations acpi_processor_throttling_fops = {
901         .open = acpi_processor_throttling_open_fs,
902         .read = seq_read,
903         .write = acpi_processor_write_throttling,
904         .llseek = seq_lseek,
905         .release = single_release,
906 };