[ARM] omap: i2c: use short connection ids
[linux-2.6] / drivers / acpi / processor_core.c
1 /*
2  * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *  TBD:
28  *      1. Make # power states dynamic.
29  *      2. Support duty_cycle values that span bit 4.
30  *      3. Optimize by having scheduler determine business instead of
31  *         having us try to calculate it here.
32  *      4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33  */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
47 #include <linux/cpuidle.h>
48
49 #include <asm/io.h>
50 #include <asm/system.h>
51 #include <asm/cpu.h>
52 #include <asm/delay.h>
53 #include <asm/uaccess.h>
54 #include <asm/processor.h>
55 #include <asm/smp.h>
56 #include <asm/acpi.h>
57
58 #include <acpi/acpi_bus.h>
59 #include <acpi/acpi_drivers.h>
60 #include <acpi/processor.h>
61
62 #define ACPI_PROCESSOR_CLASS            "processor"
63 #define ACPI_PROCESSOR_DEVICE_NAME      "Processor"
64 #define ACPI_PROCESSOR_FILE_INFO        "info"
65 #define ACPI_PROCESSOR_FILE_THROTTLING  "throttling"
66 #define ACPI_PROCESSOR_FILE_LIMIT       "limit"
67 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
68 #define ACPI_PROCESSOR_NOTIFY_POWER     0x81
69 #define ACPI_PROCESSOR_NOTIFY_THROTTLING        0x82
70
71 #define ACPI_PROCESSOR_LIMIT_USER       0
72 #define ACPI_PROCESSOR_LIMIT_THERMAL    1
73
74 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
75 ACPI_MODULE_NAME("processor_core");
76
77 MODULE_AUTHOR("Paul Diefenbaugh");
78 MODULE_DESCRIPTION("ACPI Processor Driver");
79 MODULE_LICENSE("GPL");
80
81 static int acpi_processor_add(struct acpi_device *device);
82 static int acpi_processor_start(struct acpi_device *device);
83 static int acpi_processor_remove(struct acpi_device *device, int type);
84 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
85 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
86 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
87 static int acpi_processor_handle_eject(struct acpi_processor *pr);
88
89
90 static const struct acpi_device_id processor_device_ids[] = {
91         {ACPI_PROCESSOR_OBJECT_HID, 0},
92         {ACPI_PROCESSOR_HID, 0},
93         {"", 0},
94 };
95 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
96
97 static struct acpi_driver acpi_processor_driver = {
98         .name = "processor",
99         .class = ACPI_PROCESSOR_CLASS,
100         .ids = processor_device_ids,
101         .ops = {
102                 .add = acpi_processor_add,
103                 .remove = acpi_processor_remove,
104                 .start = acpi_processor_start,
105                 .suspend = acpi_processor_suspend,
106                 .resume = acpi_processor_resume,
107                 },
108 };
109
110 #define INSTALL_NOTIFY_HANDLER          1
111 #define UNINSTALL_NOTIFY_HANDLER        2
112
113 static const struct file_operations acpi_processor_info_fops = {
114         .owner = THIS_MODULE,
115         .open = acpi_processor_info_open_fs,
116         .read = seq_read,
117         .llseek = seq_lseek,
118         .release = single_release,
119 };
120
121 DEFINE_PER_CPU(struct acpi_processor *, processors);
122 struct acpi_processor_errata errata __read_mostly;
123 static int set_no_mwait(const struct dmi_system_id *id)
124 {
125         printk(KERN_NOTICE PREFIX "%s detected - "
126                 "disabling mwait for CPU C-states\n", id->ident);
127         idle_nomwait = 1;
128         return 0;
129 }
130
131 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
132         {
133         set_no_mwait, "IFL91 board", {
134         DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
135         DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
136         DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
137         DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
138         {
139         set_no_mwait, "Extensa 5220", {
140         DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
141         DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
142         DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
143         DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
144         {},
145 };
146
147 /* --------------------------------------------------------------------------
148                                 Errata Handling
149    -------------------------------------------------------------------------- */
150
151 static int acpi_processor_errata_piix4(struct pci_dev *dev)
152 {
153         u8 value1 = 0;
154         u8 value2 = 0;
155
156
157         if (!dev)
158                 return -EINVAL;
159
160         /*
161          * Note that 'dev' references the PIIX4 ACPI Controller.
162          */
163
164         switch (dev->revision) {
165         case 0:
166                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
167                 break;
168         case 1:
169                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
170                 break;
171         case 2:
172                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
173                 break;
174         case 3:
175                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
176                 break;
177         default:
178                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
179                 break;
180         }
181
182         switch (dev->revision) {
183
184         case 0:         /* PIIX4 A-step */
185         case 1:         /* PIIX4 B-step */
186                 /*
187                  * See specification changes #13 ("Manual Throttle Duty Cycle")
188                  * and #14 ("Enabling and Disabling Manual Throttle"), plus
189                  * erratum #5 ("STPCLK# Deassertion Time") from the January
190                  * 2002 PIIX4 specification update.  Applies to only older
191                  * PIIX4 models.
192                  */
193                 errata.piix4.throttle = 1;
194
195         case 2:         /* PIIX4E */
196         case 3:         /* PIIX4M */
197                 /*
198                  * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
199                  * Livelock") from the January 2002 PIIX4 specification update.
200                  * Applies to all PIIX4 models.
201                  */
202
203                 /*
204                  * BM-IDE
205                  * ------
206                  * Find the PIIX4 IDE Controller and get the Bus Master IDE
207                  * Status register address.  We'll use this later to read
208                  * each IDE controller's DMA status to make sure we catch all
209                  * DMA activity.
210                  */
211                 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
212                                      PCI_DEVICE_ID_INTEL_82371AB,
213                                      PCI_ANY_ID, PCI_ANY_ID, NULL);
214                 if (dev) {
215                         errata.piix4.bmisx = pci_resource_start(dev, 4);
216                         pci_dev_put(dev);
217                 }
218
219                 /*
220                  * Type-F DMA
221                  * ----------
222                  * Find the PIIX4 ISA Controller and read the Motherboard
223                  * DMA controller's status to see if Type-F (Fast) DMA mode
224                  * is enabled (bit 7) on either channel.  Note that we'll
225                  * disable C3 support if this is enabled, as some legacy
226                  * devices won't operate well if fast DMA is disabled.
227                  */
228                 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
229                                      PCI_DEVICE_ID_INTEL_82371AB_0,
230                                      PCI_ANY_ID, PCI_ANY_ID, NULL);
231                 if (dev) {
232                         pci_read_config_byte(dev, 0x76, &value1);
233                         pci_read_config_byte(dev, 0x77, &value2);
234                         if ((value1 & 0x80) || (value2 & 0x80))
235                                 errata.piix4.fdma = 1;
236                         pci_dev_put(dev);
237                 }
238
239                 break;
240         }
241
242         if (errata.piix4.bmisx)
243                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
244                                   "Bus master activity detection (BM-IDE) erratum enabled\n"));
245         if (errata.piix4.fdma)
246                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
247                                   "Type-F DMA livelock erratum (C3 disabled)\n"));
248
249         return 0;
250 }
251
252 static int acpi_processor_errata(struct acpi_processor *pr)
253 {
254         int result = 0;
255         struct pci_dev *dev = NULL;
256
257
258         if (!pr)
259                 return -EINVAL;
260
261         /*
262          * PIIX4
263          */
264         dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
265                              PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
266                              PCI_ANY_ID, NULL);
267         if (dev) {
268                 result = acpi_processor_errata_piix4(dev);
269                 pci_dev_put(dev);
270         }
271
272         return result;
273 }
274
275 /* --------------------------------------------------------------------------
276                               Common ACPI processor functions
277    -------------------------------------------------------------------------- */
278
279 /*
280  * _PDC is required for a BIOS-OS handshake for most of the newer
281  * ACPI processor features.
282  */
283 static int acpi_processor_set_pdc(struct acpi_processor *pr)
284 {
285         struct acpi_object_list *pdc_in = pr->pdc;
286         acpi_status status = AE_OK;
287
288
289         if (!pdc_in)
290                 return status;
291         if (idle_nomwait) {
292                 /*
293                  * If mwait is disabled for CPU C-states, the C2C3_FFH access
294                  * mode will be disabled in the parameter of _PDC object.
295                  * Of course C1_FFH access mode will also be disabled.
296                  */
297                 union acpi_object *obj;
298                 u32 *buffer = NULL;
299
300                 obj = pdc_in->pointer;
301                 buffer = (u32 *)(obj->buffer.pointer);
302                 buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
303
304         }
305         status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
306
307         if (ACPI_FAILURE(status))
308                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
309                     "Could not evaluate _PDC, using legacy perf. control...\n"));
310
311         return status;
312 }
313
314 /* --------------------------------------------------------------------------
315                               FS Interface (/proc)
316    -------------------------------------------------------------------------- */
317
318 static struct proc_dir_entry *acpi_processor_dir = NULL;
319
320 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
321 {
322         struct acpi_processor *pr = seq->private;
323
324
325         if (!pr)
326                 goto end;
327
328         seq_printf(seq, "processor id:            %d\n"
329                    "acpi id:                 %d\n"
330                    "bus mastering control:   %s\n"
331                    "power management:        %s\n"
332                    "throttling control:      %s\n"
333                    "limit interface:         %s\n",
334                    pr->id,
335                    pr->acpi_id,
336                    pr->flags.bm_control ? "yes" : "no",
337                    pr->flags.power ? "yes" : "no",
338                    pr->flags.throttling ? "yes" : "no",
339                    pr->flags.limit ? "yes" : "no");
340
341       end:
342         return 0;
343 }
344
345 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
346 {
347         return single_open(file, acpi_processor_info_seq_show,
348                            PDE(inode)->data);
349 }
350
351 static int acpi_processor_add_fs(struct acpi_device *device)
352 {
353         struct proc_dir_entry *entry = NULL;
354
355
356         if (!acpi_device_dir(device)) {
357                 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
358                                                      acpi_processor_dir);
359                 if (!acpi_device_dir(device))
360                         return -ENODEV;
361         }
362         acpi_device_dir(device)->owner = THIS_MODULE;
363
364         /* 'info' [R] */
365         entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
366                                  S_IRUGO, acpi_device_dir(device),
367                                  &acpi_processor_info_fops,
368                                  acpi_driver_data(device));
369         if (!entry)
370                 return -EIO;
371
372         /* 'throttling' [R/W] */
373         entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
374                                  S_IFREG | S_IRUGO | S_IWUSR,
375                                  acpi_device_dir(device),
376                                  &acpi_processor_throttling_fops,
377                                  acpi_driver_data(device));
378         if (!entry)
379                 return -EIO;
380
381         /* 'limit' [R/W] */
382         entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
383                                  S_IFREG | S_IRUGO | S_IWUSR,
384                                  acpi_device_dir(device),
385                                  &acpi_processor_limit_fops,
386                                  acpi_driver_data(device));
387         if (!entry)
388                 return -EIO;
389         return 0;
390 }
391
392 static int acpi_processor_remove_fs(struct acpi_device *device)
393 {
394
395         if (acpi_device_dir(device)) {
396                 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
397                                   acpi_device_dir(device));
398                 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
399                                   acpi_device_dir(device));
400                 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
401                                   acpi_device_dir(device));
402                 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
403                 acpi_device_dir(device) = NULL;
404         }
405
406         return 0;
407 }
408
409 /* Use the acpiid in MADT to map cpus in case of SMP */
410
411 #ifndef CONFIG_SMP
412 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id) { return -1; }
413 #else
414
415 static struct acpi_table_madt *madt;
416
417 static int map_lapic_id(struct acpi_subtable_header *entry,
418                  u32 acpi_id, int *apic_id)
419 {
420         struct acpi_madt_local_apic *lapic =
421                 (struct acpi_madt_local_apic *)entry;
422         if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
423             lapic->processor_id == acpi_id) {
424                 *apic_id = lapic->id;
425                 return 1;
426         }
427         return 0;
428 }
429
430 static int map_lsapic_id(struct acpi_subtable_header *entry,
431                 int device_declaration, u32 acpi_id, int *apic_id)
432 {
433         struct acpi_madt_local_sapic *lsapic =
434                 (struct acpi_madt_local_sapic *)entry;
435         u32 tmp = (lsapic->id << 8) | lsapic->eid;
436
437         /* Only check enabled APICs*/
438         if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
439                 return 0;
440
441         /* Device statement declaration type */
442         if (device_declaration) {
443                 if (entry->length < 16)
444                         printk(KERN_ERR PREFIX
445                             "Invalid LSAPIC with Device type processor (SAPIC ID %#x)\n",
446                             tmp);
447                 else if (lsapic->uid == acpi_id)
448                         goto found;
449         /* Processor statement declaration type */
450         } else if (lsapic->processor_id == acpi_id)
451                 goto found;
452
453         return 0;
454 found:
455         *apic_id = tmp;
456         return 1;
457 }
458
459 static int map_madt_entry(int type, u32 acpi_id)
460 {
461         unsigned long madt_end, entry;
462         int apic_id = -1;
463
464         if (!madt)
465                 return apic_id;
466
467         entry = (unsigned long)madt;
468         madt_end = entry + madt->header.length;
469
470         /* Parse all entries looking for a match. */
471
472         entry += sizeof(struct acpi_table_madt);
473         while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
474                 struct acpi_subtable_header *header =
475                         (struct acpi_subtable_header *)entry;
476                 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
477                         if (map_lapic_id(header, acpi_id, &apic_id))
478                                 break;
479                 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
480                         if (map_lsapic_id(header, type, acpi_id, &apic_id))
481                                 break;
482                 }
483                 entry += header->length;
484         }
485         return apic_id;
486 }
487
488 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
489 {
490         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
491         union acpi_object *obj;
492         struct acpi_subtable_header *header;
493         int apic_id = -1;
494
495         if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
496                 goto exit;
497
498         if (!buffer.length || !buffer.pointer)
499                 goto exit;
500
501         obj = buffer.pointer;
502         if (obj->type != ACPI_TYPE_BUFFER ||
503             obj->buffer.length < sizeof(struct acpi_subtable_header)) {
504                 goto exit;
505         }
506
507         header = (struct acpi_subtable_header *)obj->buffer.pointer;
508         if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
509                 map_lapic_id(header, acpi_id, &apic_id);
510         } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
511                 map_lsapic_id(header, type, acpi_id, &apic_id);
512         }
513
514 exit:
515         if (buffer.pointer)
516                 kfree(buffer.pointer);
517         return apic_id;
518 }
519
520 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id)
521 {
522         int i;
523         int apic_id = -1;
524
525         apic_id = map_mat_entry(handle, type, acpi_id);
526         if (apic_id == -1)
527                 apic_id = map_madt_entry(type, acpi_id);
528         if (apic_id == -1)
529                 return apic_id;
530
531         for_each_possible_cpu(i) {
532                 if (cpu_physical_id(i) == apic_id)
533                         return i;
534         }
535         return -1;
536 }
537 #endif
538
539 /* --------------------------------------------------------------------------
540                                  Driver Interface
541    -------------------------------------------------------------------------- */
542
543 static int acpi_processor_get_info(struct acpi_device *device)
544 {
545         acpi_status status = 0;
546         union acpi_object object = { 0 };
547         struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
548         struct acpi_processor *pr;
549         int cpu_index, device_declaration = 0;
550         static int cpu0_initialized;
551
552         pr = acpi_driver_data(device);
553         if (!pr)
554                 return -EINVAL;
555
556         if (num_online_cpus() > 1)
557                 errata.smp = TRUE;
558
559         acpi_processor_errata(pr);
560
561         /*
562          * Check to see if we have bus mastering arbitration control.  This
563          * is required for proper C3 usage (to maintain cache coherency).
564          */
565         if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
566                 pr->flags.bm_control = 1;
567                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
568                                   "Bus mastering arbitration control present\n"));
569         } else
570                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
571                                   "No bus mastering arbitration control\n"));
572
573         if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
574                 /*
575                  * Declared with "Device" statement; match _UID.
576                  * Note that we don't handle string _UIDs yet.
577                  */
578                 unsigned long long value;
579                 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
580                                                 NULL, &value);
581                 if (ACPI_FAILURE(status)) {
582                         printk(KERN_ERR PREFIX
583                             "Evaluating processor _UID [%#x]\n", status);
584                         return -ENODEV;
585                 }
586                 device_declaration = 1;
587                 pr->acpi_id = value;
588         } else {
589                 /* Declared with "Processor" statement; match ProcessorID */
590                 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
591                 if (ACPI_FAILURE(status)) {
592                         printk(KERN_ERR PREFIX "Evaluating processor object\n");
593                         return -ENODEV;
594                 }
595
596                 /*
597                  * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
598                  *      >>> 'acpi_get_processor_id(acpi_id, &id)' in
599                  *      arch/xxx/acpi.c
600                  */
601                 pr->acpi_id = object.processor.proc_id;
602         }
603         cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
604
605         /* Handle UP system running SMP kernel, with no LAPIC in MADT */
606         if (!cpu0_initialized && (cpu_index == -1) &&
607             (num_online_cpus() == 1)) {
608                 cpu_index = 0;
609         }
610
611         cpu0_initialized = 1;
612
613         pr->id = cpu_index;
614
615         /*
616          *  Extra Processor objects may be enumerated on MP systems with
617          *  less than the max # of CPUs. They should be ignored _iff
618          *  they are physically not present.
619          */
620         if (pr->id == -1) {
621                 if (ACPI_FAILURE
622                     (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
623                         return -ENODEV;
624                 }
625         }
626
627         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
628                           pr->acpi_id));
629
630         if (!object.processor.pblk_address)
631                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
632         else if (object.processor.pblk_length != 6)
633                 printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
634                             object.processor.pblk_length);
635         else {
636                 pr->throttling.address = object.processor.pblk_address;
637                 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
638                 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
639
640                 pr->pblk = object.processor.pblk_address;
641
642                 /*
643                  * We don't care about error returns - we just try to mark
644                  * these reserved so that nobody else is confused into thinking
645                  * that this region might be unused..
646                  *
647                  * (In particular, allocating the IO range for Cardbus)
648                  */
649                 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
650         }
651
652         /*
653          * If ACPI describes a slot number for this CPU, we can use it
654          * ensure we get the right value in the "physical id" field
655          * of /proc/cpuinfo
656          */
657         status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
658         if (ACPI_SUCCESS(status))
659                 arch_fix_phys_package_id(pr->id, object.integer.value);
660
661         return 0;
662 }
663
664 static DEFINE_PER_CPU(void *, processor_device_array);
665
666 static int __cpuinit acpi_processor_start(struct acpi_device *device)
667 {
668         int result = 0;
669         acpi_status status = AE_OK;
670         struct acpi_processor *pr;
671         struct sys_device *sysdev;
672
673         pr = acpi_driver_data(device);
674
675         result = acpi_processor_get_info(device);
676         if (result) {
677                 /* Processor is physically not present */
678                 return 0;
679         }
680
681         BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
682
683         /*
684          * Buggy BIOS check
685          * ACPI id of processors can be reported wrongly by the BIOS.
686          * Don't trust it blindly
687          */
688         if (per_cpu(processor_device_array, pr->id) != NULL &&
689             per_cpu(processor_device_array, pr->id) != device) {
690                 printk(KERN_WARNING "BIOS reported wrong ACPI id "
691                         "for the processor\n");
692                 return -ENODEV;
693         }
694         per_cpu(processor_device_array, pr->id) = device;
695
696         per_cpu(processors, pr->id) = pr;
697
698         result = acpi_processor_add_fs(device);
699         if (result)
700                 goto end;
701
702         sysdev = get_cpu_sysdev(pr->id);
703         if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
704                 return -EFAULT;
705
706         status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
707                                              acpi_processor_notify, pr);
708
709         /* _PDC call should be done before doing anything else (if reqd.). */
710         arch_acpi_processor_init_pdc(pr);
711         acpi_processor_set_pdc(pr);
712 #ifdef CONFIG_CPU_FREQ
713         acpi_processor_ppc_has_changed(pr);
714 #endif
715         acpi_processor_get_throttling_info(pr);
716         acpi_processor_get_limit_info(pr);
717
718
719         acpi_processor_power_init(pr, device);
720
721         pr->cdev = thermal_cooling_device_register("Processor", device,
722                                                 &processor_cooling_ops);
723         if (IS_ERR(pr->cdev)) {
724                 result = PTR_ERR(pr->cdev);
725                 goto end;
726         }
727
728         dev_info(&device->dev, "registered as cooling_device%d\n",
729                  pr->cdev->id);
730
731         result = sysfs_create_link(&device->dev.kobj,
732                                    &pr->cdev->device.kobj,
733                                    "thermal_cooling");
734         if (result)
735                 printk(KERN_ERR PREFIX "Create sysfs link\n");
736         result = sysfs_create_link(&pr->cdev->device.kobj,
737                                    &device->dev.kobj,
738                                    "device");
739         if (result)
740                 printk(KERN_ERR PREFIX "Create sysfs link\n");
741
742         if (pr->flags.throttling) {
743                 printk(KERN_INFO PREFIX "%s [%s] (supports",
744                        acpi_device_name(device), acpi_device_bid(device));
745                 printk(" %d throttling states", pr->throttling.state_count);
746                 printk(")\n");
747         }
748
749       end:
750
751         return result;
752 }
753
754 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
755 {
756         struct acpi_processor *pr = data;
757         struct acpi_device *device = NULL;
758         int saved;
759
760         if (!pr)
761                 return;
762
763         if (acpi_bus_get_device(pr->handle, &device))
764                 return;
765
766         switch (event) {
767         case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
768                 saved = pr->performance_platform_limit;
769                 acpi_processor_ppc_has_changed(pr);
770                 if (saved == pr->performance_platform_limit)
771                         break;
772                 acpi_bus_generate_proc_event(device, event,
773                                         pr->performance_platform_limit);
774                 acpi_bus_generate_netlink_event(device->pnp.device_class,
775                                                   dev_name(&device->dev), event,
776                                                   pr->performance_platform_limit);
777                 break;
778         case ACPI_PROCESSOR_NOTIFY_POWER:
779                 acpi_processor_cst_has_changed(pr);
780                 acpi_bus_generate_proc_event(device, event, 0);
781                 acpi_bus_generate_netlink_event(device->pnp.device_class,
782                                                   dev_name(&device->dev), event, 0);
783                 break;
784         case ACPI_PROCESSOR_NOTIFY_THROTTLING:
785                 acpi_processor_tstate_has_changed(pr);
786                 acpi_bus_generate_proc_event(device, event, 0);
787                 acpi_bus_generate_netlink_event(device->pnp.device_class,
788                                                   dev_name(&device->dev), event, 0);
789         default:
790                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
791                                   "Unsupported event [0x%x]\n", event));
792                 break;
793         }
794
795         return;
796 }
797
798 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
799                 unsigned long action, void *hcpu)
800 {
801         unsigned int cpu = (unsigned long)hcpu;
802         struct acpi_processor *pr = per_cpu(processors, cpu);
803
804         if (action == CPU_ONLINE && pr) {
805                 acpi_processor_ppc_has_changed(pr);
806                 acpi_processor_cst_has_changed(pr);
807                 acpi_processor_tstate_has_changed(pr);
808         }
809         return NOTIFY_OK;
810 }
811
812 static struct notifier_block acpi_cpu_notifier =
813 {
814             .notifier_call = acpi_cpu_soft_notify,
815 };
816
817 static int acpi_processor_add(struct acpi_device *device)
818 {
819         struct acpi_processor *pr = NULL;
820
821
822         if (!device)
823                 return -EINVAL;
824
825         pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
826         if (!pr)
827                 return -ENOMEM;
828
829         if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
830                 kfree(pr);
831                 return -ENOMEM;
832         }
833
834         pr->handle = device->handle;
835         strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
836         strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
837         device->driver_data = pr;
838
839         return 0;
840 }
841
842 static int acpi_processor_remove(struct acpi_device *device, int type)
843 {
844         acpi_status status = AE_OK;
845         struct acpi_processor *pr = NULL;
846
847
848         if (!device || !acpi_driver_data(device))
849                 return -EINVAL;
850
851         pr = acpi_driver_data(device);
852
853         if (pr->id >= nr_cpu_ids)
854                 goto free;
855
856         if (type == ACPI_BUS_REMOVAL_EJECT) {
857                 if (acpi_processor_handle_eject(pr))
858                         return -EINVAL;
859         }
860
861         acpi_processor_power_exit(pr, device);
862
863         status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
864                                             acpi_processor_notify);
865
866         sysfs_remove_link(&device->dev.kobj, "sysdev");
867
868         acpi_processor_remove_fs(device);
869
870         if (pr->cdev) {
871                 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
872                 sysfs_remove_link(&pr->cdev->device.kobj, "device");
873                 thermal_cooling_device_unregister(pr->cdev);
874                 pr->cdev = NULL;
875         }
876
877         per_cpu(processors, pr->id) = NULL;
878         per_cpu(processor_device_array, pr->id) = NULL;
879
880 free:
881         free_cpumask_var(pr->throttling.shared_cpu_map);
882         kfree(pr);
883
884         return 0;
885 }
886
887 #ifdef CONFIG_ACPI_HOTPLUG_CPU
888 /****************************************************************************
889  *      Acpi processor hotplug support                                      *
890  ****************************************************************************/
891
892 static int is_processor_present(acpi_handle handle)
893 {
894         acpi_status status;
895         unsigned long long sta = 0;
896
897
898         status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
899
900         if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
901                 return 1;
902
903         /*
904          * _STA is mandatory for a processor that supports hot plug
905          */
906         if (status == AE_NOT_FOUND)
907                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
908                                 "Processor does not support hot plug\n"));
909         else
910                 ACPI_EXCEPTION((AE_INFO, status,
911                                 "Processor Device is not present"));
912         return 0;
913 }
914
915 static
916 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
917 {
918         acpi_handle phandle;
919         struct acpi_device *pdev;
920         struct acpi_processor *pr;
921
922
923         if (acpi_get_parent(handle, &phandle)) {
924                 return -ENODEV;
925         }
926
927         if (acpi_bus_get_device(phandle, &pdev)) {
928                 return -ENODEV;
929         }
930
931         if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
932                 return -ENODEV;
933         }
934
935         acpi_bus_start(*device);
936
937         pr = acpi_driver_data(*device);
938         if (!pr)
939                 return -ENODEV;
940
941         if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
942                 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
943         }
944         return 0;
945 }
946
947 static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
948                                                 u32 event, void *data)
949 {
950         struct acpi_processor *pr;
951         struct acpi_device *device = NULL;
952         int result;
953
954
955         switch (event) {
956         case ACPI_NOTIFY_BUS_CHECK:
957         case ACPI_NOTIFY_DEVICE_CHECK:
958                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
959                 "Processor driver received %s event\n",
960                        (event == ACPI_NOTIFY_BUS_CHECK) ?
961                        "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
962
963                 if (!is_processor_present(handle))
964                         break;
965
966                 if (acpi_bus_get_device(handle, &device)) {
967                         result = acpi_processor_device_add(handle, &device);
968                         if (result)
969                                 printk(KERN_ERR PREFIX
970                                             "Unable to add the device\n");
971                         break;
972                 }
973
974                 pr = acpi_driver_data(device);
975                 if (!pr) {
976                         printk(KERN_ERR PREFIX "Driver data is NULL\n");
977                         break;
978                 }
979
980                 if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
981                         kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
982                         break;
983                 }
984
985                 result = acpi_processor_start(device);
986                 if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
987                         kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
988                 } else {
989                         printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
990                                     acpi_device_bid(device));
991                 }
992                 break;
993         case ACPI_NOTIFY_EJECT_REQUEST:
994                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
995                                   "received ACPI_NOTIFY_EJECT_REQUEST\n"));
996
997                 if (acpi_bus_get_device(handle, &device)) {
998                         printk(KERN_ERR PREFIX
999                                     "Device don't exist, dropping EJECT\n");
1000                         break;
1001                 }
1002                 pr = acpi_driver_data(device);
1003                 if (!pr) {
1004                         printk(KERN_ERR PREFIX
1005                                     "Driver data is NULL, dropping EJECT\n");
1006                         return;
1007                 }
1008
1009                 if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
1010                         kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
1011                 break;
1012         default:
1013                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1014                                   "Unsupported event [0x%x]\n", event));
1015                 break;
1016         }
1017
1018         return;
1019 }
1020
1021 static acpi_status
1022 processor_walk_namespace_cb(acpi_handle handle,
1023                             u32 lvl, void *context, void **rv)
1024 {
1025         acpi_status status;
1026         int *action = context;
1027         acpi_object_type type = 0;
1028
1029         status = acpi_get_type(handle, &type);
1030         if (ACPI_FAILURE(status))
1031                 return (AE_OK);
1032
1033         if (type != ACPI_TYPE_PROCESSOR)
1034                 return (AE_OK);
1035
1036         switch (*action) {
1037         case INSTALL_NOTIFY_HANDLER:
1038                 acpi_install_notify_handler(handle,
1039                                             ACPI_SYSTEM_NOTIFY,
1040                                             acpi_processor_hotplug_notify,
1041                                             NULL);
1042                 break;
1043         case UNINSTALL_NOTIFY_HANDLER:
1044                 acpi_remove_notify_handler(handle,
1045                                            ACPI_SYSTEM_NOTIFY,
1046                                            acpi_processor_hotplug_notify);
1047                 break;
1048         default:
1049                 break;
1050         }
1051
1052         return (AE_OK);
1053 }
1054
1055 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1056 {
1057
1058         if (!is_processor_present(handle)) {
1059                 return AE_ERROR;
1060         }
1061
1062         if (acpi_map_lsapic(handle, p_cpu))
1063                 return AE_ERROR;
1064
1065         if (arch_register_cpu(*p_cpu)) {
1066                 acpi_unmap_lsapic(*p_cpu);
1067                 return AE_ERROR;
1068         }
1069
1070         return AE_OK;
1071 }
1072
1073 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1074 {
1075         if (cpu_online(pr->id))
1076                 cpu_down(pr->id);
1077
1078         arch_unregister_cpu(pr->id);
1079         acpi_unmap_lsapic(pr->id);
1080         return (0);
1081 }
1082 #else
1083 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1084 {
1085         return AE_ERROR;
1086 }
1087 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1088 {
1089         return (-EINVAL);
1090 }
1091 #endif
1092
1093 static
1094 void acpi_processor_install_hotplug_notify(void)
1095 {
1096 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1097         int action = INSTALL_NOTIFY_HANDLER;
1098         acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1099                             ACPI_ROOT_OBJECT,
1100                             ACPI_UINT32_MAX,
1101                             processor_walk_namespace_cb, &action, NULL);
1102 #endif
1103         register_hotcpu_notifier(&acpi_cpu_notifier);
1104 }
1105
1106 static
1107 void acpi_processor_uninstall_hotplug_notify(void)
1108 {
1109 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1110         int action = UNINSTALL_NOTIFY_HANDLER;
1111         acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1112                             ACPI_ROOT_OBJECT,
1113                             ACPI_UINT32_MAX,
1114                             processor_walk_namespace_cb, &action, NULL);
1115 #endif
1116         unregister_hotcpu_notifier(&acpi_cpu_notifier);
1117 }
1118
1119 /*
1120  * We keep the driver loaded even when ACPI is not running.
1121  * This is needed for the powernow-k8 driver, that works even without
1122  * ACPI, but needs symbols from this driver
1123  */
1124
1125 static int __init acpi_processor_init(void)
1126 {
1127         int result = 0;
1128
1129         memset(&errata, 0, sizeof(errata));
1130
1131 #ifdef CONFIG_SMP
1132         if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1133                                 (struct acpi_table_header **)&madt)))
1134                 madt = NULL;
1135 #endif
1136
1137         acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1138         if (!acpi_processor_dir)
1139                 return -ENOMEM;
1140         acpi_processor_dir->owner = THIS_MODULE;
1141
1142         /*
1143          * Check whether the system is DMI table. If yes, OSPM
1144          * should not use mwait for CPU-states.
1145          */
1146         dmi_check_system(processor_idle_dmi_table);
1147         result = cpuidle_register_driver(&acpi_idle_driver);
1148         if (result < 0)
1149                 goto out_proc;
1150
1151         result = acpi_bus_register_driver(&acpi_processor_driver);
1152         if (result < 0)
1153                 goto out_cpuidle;
1154
1155         acpi_processor_install_hotplug_notify();
1156
1157         acpi_thermal_cpufreq_init();
1158
1159         acpi_processor_ppc_init();
1160
1161         acpi_processor_throttling_init();
1162
1163         return 0;
1164
1165 out_cpuidle:
1166         cpuidle_unregister_driver(&acpi_idle_driver);
1167
1168 out_proc:
1169         remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1170
1171         return result;
1172 }
1173
1174 static void __exit acpi_processor_exit(void)
1175 {
1176         acpi_processor_ppc_exit();
1177
1178         acpi_thermal_cpufreq_exit();
1179
1180         acpi_processor_uninstall_hotplug_notify();
1181
1182         acpi_bus_unregister_driver(&acpi_processor_driver);
1183
1184         cpuidle_unregister_driver(&acpi_idle_driver);
1185
1186         remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1187
1188         return;
1189 }
1190
1191 module_init(acpi_processor_init);
1192 module_exit(acpi_processor_exit);
1193
1194 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1195
1196 MODULE_ALIAS("processor");