Pull trivial2 into release branch
[linux-2.6] / arch / x86_64 / kernel / genapic_flat.c
1 /*
2  * Copyright 2004 James Cleverdon, IBM.
3  * Subject to the GNU Public License, v.2
4  *
5  * Flat APIC subarch code.
6  *
7  * Hacked for x86-64 by James Cleverdon from i386 architecture code by
8  * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
9  * James Cleverdon.
10  */
11 #include <linux/config.h>
12 #include <linux/threads.h>
13 #include <linux/cpumask.h>
14 #include <linux/string.h>
15 #include <linux/kernel.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <asm/smp.h>
19 #include <asm/ipi.h>
20
21 static cpumask_t flat_target_cpus(void)
22 {
23         return cpu_online_map;
24 }
25
26 /*
27  * Set up the logical destination ID.
28  *
29  * Intel recommends to set DFR, LDR and TPR before enabling
30  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
31  * document number 292116).  So here it goes...
32  */
33 static void flat_init_apic_ldr(void)
34 {
35         unsigned long val;
36         unsigned long num, id;
37
38         num = smp_processor_id();
39         id = 1UL << num;
40         x86_cpu_to_log_apicid[num] = id;
41         apic_write(APIC_DFR, APIC_DFR_FLAT);
42         val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
43         val |= SET_APIC_LOGICAL_ID(id);
44         apic_write(APIC_LDR, val);
45 }
46
47 static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
48 {
49         unsigned long mask = cpus_addr(cpumask)[0];
50         unsigned long cfg;
51         unsigned long flags;
52
53         local_save_flags(flags);
54         local_irq_disable();
55
56         /*
57          * Wait for idle.
58          */
59         apic_wait_icr_idle();
60
61         /*
62          * prepare target chip field
63          */
64         cfg = __prepare_ICR2(mask);
65         apic_write(APIC_ICR2, cfg);
66
67         /*
68          * program the ICR
69          */
70         cfg = __prepare_ICR(0, vector, APIC_DEST_LOGICAL);
71
72         /*
73          * Send the IPI. The write to APIC_ICR fires this off.
74          */
75         apic_write(APIC_ICR, cfg);
76         local_irq_restore(flags);
77 }
78
79 static void flat_send_IPI_allbutself(int vector)
80 {
81 #ifndef CONFIG_HOTPLUG_CPU
82         if (((num_online_cpus()) - 1) >= 1)
83                 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
84 #else
85         cpumask_t allbutme = cpu_online_map;
86
87         cpu_clear(smp_processor_id(), allbutme);
88
89         if (!cpus_empty(allbutme))
90                 flat_send_IPI_mask(allbutme, vector);
91 #endif
92 }
93
94 static void flat_send_IPI_all(int vector)
95 {
96         __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
97 }
98
99 static int flat_apic_id_registered(void)
100 {
101         return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
102 }
103
104 static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
105 {
106         return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
107 }
108
109 static unsigned int phys_pkg_id(int index_msb)
110 {
111         u32 ebx;
112
113         ebx = cpuid_ebx(1);
114         return ((ebx >> 24) & 0xFF) >> index_msb;
115 }
116
117 struct genapic apic_flat =  {
118         .name = "flat",
119         .int_delivery_mode = dest_LowestPrio,
120         .int_dest_mode = (APIC_DEST_LOGICAL != 0),
121         .int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
122         .target_cpus = flat_target_cpus,
123         .apic_id_registered = flat_apic_id_registered,
124         .init_apic_ldr = flat_init_apic_ldr,
125         .send_IPI_all = flat_send_IPI_all,
126         .send_IPI_allbutself = flat_send_IPI_allbutself,
127         .send_IPI_mask = flat_send_IPI_mask,
128         .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
129         .phys_pkg_id = phys_pkg_id,
130 };
131
132 /*
133  * Physflat mode is used when there are more than 8 CPUs on a AMD system.
134  * We cannot use logical delivery in this case because the mask
135  * overflows, so use physical mode.
136  */
137
138 static cpumask_t physflat_target_cpus(void)
139 {
140         return cpumask_of_cpu(0);
141 }
142
143 static void physflat_send_IPI_mask(cpumask_t cpumask, int vector)
144 {
145         send_IPI_mask_sequence(cpumask, vector);
146 }
147
148 static void physflat_send_IPI_allbutself(int vector)
149 {
150         cpumask_t allbutme = cpu_online_map;
151
152         cpu_clear(smp_processor_id(), allbutme);
153         physflat_send_IPI_mask(allbutme, vector);
154 }
155
156 static void physflat_send_IPI_all(int vector)
157 {
158         physflat_send_IPI_mask(cpu_online_map, vector);
159 }
160
161 static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
162 {
163         int cpu;
164
165         /*
166          * We're using fixed IRQ delivery, can only return one phys APIC ID.
167          * May as well be the first.
168          */
169         cpu = first_cpu(cpumask);
170         if ((unsigned)cpu < NR_CPUS)
171                 return x86_cpu_to_apicid[cpu];
172         else
173                 return BAD_APICID;
174 }
175
176 struct genapic apic_physflat =  {
177         .name = "physical flat",
178         .int_delivery_mode = dest_Fixed,
179         .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
180         .int_delivery_dest = APIC_DEST_PHYSICAL | APIC_DM_FIXED,
181         .target_cpus = physflat_target_cpus,
182         .apic_id_registered = flat_apic_id_registered,
183         .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/
184         .send_IPI_all = physflat_send_IPI_all,
185         .send_IPI_allbutself = physflat_send_IPI_allbutself,
186         .send_IPI_mask = physflat_send_IPI_mask,
187         .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
188         .phys_pkg_id = phys_pkg_id,
189 };