Merge branch 'master' into upstream
[linux-2.6] / arch / x86_64 / kernel / genapic_flat.c
1 /*
2  * Copyright 2004 James Cleverdon, IBM.
3  * Subject to the GNU Public License, v.2
4  *
5  * Flat APIC subarch code.
6  *
7  * Hacked for x86-64 by James Cleverdon from i386 architecture code by
8  * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
9  * James Cleverdon.
10  */
11 #include <linux/threads.h>
12 #include <linux/cpumask.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/ctype.h>
16 #include <linux/init.h>
17 #include <asm/smp.h>
18 #include <asm/ipi.h>
19
20 static cpumask_t flat_target_cpus(void)
21 {
22         return cpu_online_map;
23 }
24
25 /*
26  * Set up the logical destination ID.
27  *
28  * Intel recommends to set DFR, LDR and TPR before enabling
29  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
30  * document number 292116).  So here it goes...
31  */
32 static void flat_init_apic_ldr(void)
33 {
34         unsigned long val;
35         unsigned long num, id;
36
37         num = smp_processor_id();
38         id = 1UL << num;
39         x86_cpu_to_log_apicid[num] = id;
40         apic_write(APIC_DFR, APIC_DFR_FLAT);
41         val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
42         val |= SET_APIC_LOGICAL_ID(id);
43         apic_write(APIC_LDR, val);
44 }
45
46 static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
47 {
48         unsigned long mask = cpus_addr(cpumask)[0];
49         unsigned long cfg;
50         unsigned long flags;
51
52         local_irq_save(flags);
53
54         /*
55          * Wait for idle.
56          */
57         apic_wait_icr_idle();
58
59         /*
60          * prepare target chip field
61          */
62         cfg = __prepare_ICR2(mask);
63         apic_write(APIC_ICR2, cfg);
64
65         /*
66          * program the ICR
67          */
68         cfg = __prepare_ICR(0, vector, APIC_DEST_LOGICAL);
69
70         /*
71          * Send the IPI. The write to APIC_ICR fires this off.
72          */
73         apic_write(APIC_ICR, cfg);
74         local_irq_restore(flags);
75 }
76
77 static void flat_send_IPI_allbutself(int vector)
78 {
79 #ifdef  CONFIG_HOTPLUG_CPU
80         int hotplug = 1;
81 #else
82         int hotplug = 0;
83 #endif
84         if (hotplug || vector == NMI_VECTOR) {
85                 cpumask_t allbutme = cpu_online_map;
86
87                 cpu_clear(smp_processor_id(), allbutme);
88
89                 if (!cpus_empty(allbutme))
90                         flat_send_IPI_mask(allbutme, vector);
91         } else if (num_online_cpus() > 1) {
92                 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
93         }
94 }
95
96 static void flat_send_IPI_all(int vector)
97 {
98         if (vector == NMI_VECTOR)
99                 flat_send_IPI_mask(cpu_online_map, vector);
100         else
101                 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
102 }
103
104 static int flat_apic_id_registered(void)
105 {
106         return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
107 }
108
109 static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
110 {
111         return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
112 }
113
114 static unsigned int phys_pkg_id(int index_msb)
115 {
116         return hard_smp_processor_id() >> index_msb;
117 }
118
119 struct genapic apic_flat =  {
120         .name = "flat",
121         .int_delivery_mode = dest_LowestPrio,
122         .int_dest_mode = (APIC_DEST_LOGICAL != 0),
123         .target_cpus = flat_target_cpus,
124         .apic_id_registered = flat_apic_id_registered,
125         .init_apic_ldr = flat_init_apic_ldr,
126         .send_IPI_all = flat_send_IPI_all,
127         .send_IPI_allbutself = flat_send_IPI_allbutself,
128         .send_IPI_mask = flat_send_IPI_mask,
129         .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
130         .phys_pkg_id = phys_pkg_id,
131 };
132
133 /*
134  * Physflat mode is used when there are more than 8 CPUs on a AMD system.
135  * We cannot use logical delivery in this case because the mask
136  * overflows, so use physical mode.
137  */
138
139 static cpumask_t physflat_target_cpus(void)
140 {
141         return cpumask_of_cpu(0);
142 }
143
144 static void physflat_send_IPI_mask(cpumask_t cpumask, int vector)
145 {
146         send_IPI_mask_sequence(cpumask, vector);
147 }
148
149 static void physflat_send_IPI_allbutself(int vector)
150 {
151         cpumask_t allbutme = cpu_online_map;
152
153         cpu_clear(smp_processor_id(), allbutme);
154         physflat_send_IPI_mask(allbutme, vector);
155 }
156
157 static void physflat_send_IPI_all(int vector)
158 {
159         physflat_send_IPI_mask(cpu_online_map, vector);
160 }
161
162 static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
163 {
164         int cpu;
165
166         /*
167          * We're using fixed IRQ delivery, can only return one phys APIC ID.
168          * May as well be the first.
169          */
170         cpu = first_cpu(cpumask);
171         if ((unsigned)cpu < NR_CPUS)
172                 return x86_cpu_to_apicid[cpu];
173         else
174                 return BAD_APICID;
175 }
176
177 struct genapic apic_physflat =  {
178         .name = "physical flat",
179         .int_delivery_mode = dest_Fixed,
180         .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
181         .target_cpus = physflat_target_cpus,
182         .apic_id_registered = flat_apic_id_registered,
183         .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/
184         .send_IPI_all = physflat_send_IPI_all,
185         .send_IPI_allbutself = physflat_send_IPI_allbutself,
186         .send_IPI_mask = physflat_send_IPI_mask,
187         .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
188         .phys_pkg_id = phys_pkg_id,
189 };