2 * Copyright (C) 1999 VA Linux Systems
3 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
4 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
5 * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <acpi/pdc_intel.h>
33 #include <linux/init.h>
34 #include <linux/numa.h>
35 #include <asm/system.h>
38 #define COMPILER_DEPENDENT_INT64 long
39 #define COMPILER_DEPENDENT_UINT64 unsigned long
42 * Calling conventions:
44 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
45 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
46 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
47 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
49 #define ACPI_SYSTEM_XFACE
50 #define ACPI_EXTERNAL_XFACE
51 #define ACPI_INTERNAL_XFACE
52 #define ACPI_INTERNAL_VAR_XFACE
56 #define ACPI_ASM_MACROS
58 #define ACPI_DISABLE_IRQS() local_irq_disable()
59 #define ACPI_ENABLE_IRQS() local_irq_enable()
60 #define ACPI_FLUSH_CPU_CACHE()
63 ia64_acpi_acquire_global_lock (unsigned int *lock)
65 unsigned int old, new, val;
68 new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
69 val = ia64_cmpxchg4_acq(lock, new, old);
70 } while (unlikely (val != old));
71 return (new < 3) ? -1 : 0;
75 ia64_acpi_release_global_lock (unsigned int *lock)
77 unsigned int old, new, val;
81 val = ia64_cmpxchg4_acq(lock, new, old);
82 } while (unlikely (val != old));
86 #define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
87 ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
89 #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
90 ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
92 #define acpi_disabled 0 /* ACPI always enabled on IA64 */
93 #define acpi_noirq 0 /* ACPI always enabled on IA64 */
94 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
95 #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
96 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
97 static inline void disable_acpi(void) { }
99 const char *acpi_get_sysname (void);
100 int acpi_request_vector (u32 int_type);
101 int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
103 /* routines for saving/restoring kernel state */
104 extern int acpi_save_state_mem(void);
105 extern void acpi_restore_state_mem(void);
106 extern unsigned long acpi_wakeup_address;
109 * Record the cpei override flag and current logical cpu. This is
110 * useful for CPU removal.
112 extern unsigned int can_cpei_retarget(void);
113 extern unsigned int is_cpu_cpei_target(unsigned int cpu);
114 extern void set_cpei_target_cpu(unsigned int cpu);
115 extern unsigned int get_cpei_target_cpu(void);
116 extern void prefill_possible_map(void);
117 #ifdef CONFIG_ACPI_HOTPLUG_CPU
118 extern int additional_cpus;
120 #define additional_cpus 0
123 #ifdef CONFIG_ACPI_NUMA
124 #if MAX_NUMNODES > 256
125 #define MAX_PXM_DOMAINS MAX_NUMNODES
127 #define MAX_PXM_DOMAINS (256)
129 extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
130 extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
133 #define acpi_unlazy_tlb(x)
135 #ifdef CONFIG_ACPI_NUMA
136 extern cpumask_t early_cpu_possible_map;
137 #define for_each_possible_early_cpu(cpu) \
138 for_each_cpu_mask((cpu), early_cpu_possible_map)
140 static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
142 int low_cpu, high_cpu;
146 low_cpu = cpus_weight(early_cpu_possible_map);
148 high_cpu = max(low_cpu, min_cpus);
149 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
151 for (cpu = low_cpu; cpu < high_cpu; cpu++) {
152 cpu_set(cpu, early_cpu_possible_map);
153 if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
154 node_cpuid[cpu].nid = next_nid;
156 if (next_nid >= num_online_nodes())
161 #endif /* CONFIG_ACPI_NUMA */
163 #endif /*__KERNEL__*/
165 #endif /*_ASM_ACPI_H*/