Merge branch 'sii-m15w' into upstream
[linux-2.6] / arch / ia64 / kernel / efi.c
1 /*
2  * Extensible Firmware Interface
3  *
4  * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999
5  *
6  * Copyright (C) 1999 VA Linux Systems
7  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8  * Copyright (C) 1999-2003 Hewlett-Packard Co.
9  *      David Mosberger-Tang <davidm@hpl.hp.com>
10  *      Stephane Eranian <eranian@hpl.hp.com>
11  * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
12  *      Bjorn Helgaas <bjorn.helgaas@hp.com>
13  *
14  * All EFI Runtime Services are not implemented yet as EFI only
15  * supports physical mode addressing on SoftSDV. This is to be fixed
16  * in a future version.  --drummond 1999-07-20
17  *
18  * Implemented EFI runtime services and virtual mode calls.  --davidm
19  *
20  * Goutham Rao: <goutham.rao@intel.com>
21  *      Skip non-WB memory and ignore empty memory ranges.
22  */
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/types.h>
27 #include <linux/time.h>
28 #include <linux/efi.h>
29
30 #include <asm/io.h>
31 #include <asm/kregs.h>
32 #include <asm/meminit.h>
33 #include <asm/pgtable.h>
34 #include <asm/processor.h>
35 #include <asm/mca.h>
36
37 #define EFI_DEBUG       0
38
39 extern efi_status_t efi_call_phys (void *, ...);
40
41 struct efi efi;
42 EXPORT_SYMBOL(efi);
43 static efi_runtime_services_t *runtime;
44 static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
45
46 #define efi_call_virt(f, args...)       (*(f))(args)
47
48 #define STUB_GET_TIME(prefix, adjust_arg)                                                         \
49 static efi_status_t                                                                               \
50 prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc)                                            \
51 {                                                                                                 \
52         struct ia64_fpreg fr[6];                                                                  \
53         efi_time_cap_t *atc = NULL;                                                               \
54         efi_status_t ret;                                                                         \
55                                                                                                   \
56         if (tc)                                                                                   \
57                 atc = adjust_arg(tc);                                                             \
58         ia64_save_scratch_fpregs(fr);                                                             \
59         ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \
60         ia64_load_scratch_fpregs(fr);                                                             \
61         return ret;                                                                               \
62 }
63
64 #define STUB_SET_TIME(prefix, adjust_arg)                                                       \
65 static efi_status_t                                                                             \
66 prefix##_set_time (efi_time_t *tm)                                                              \
67 {                                                                                               \
68         struct ia64_fpreg fr[6];                                                                \
69         efi_status_t ret;                                                                       \
70                                                                                                 \
71         ia64_save_scratch_fpregs(fr);                                                           \
72         ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm));    \
73         ia64_load_scratch_fpregs(fr);                                                           \
74         return ret;                                                                             \
75 }
76
77 #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg)                                                \
78 static efi_status_t                                                                             \
79 prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm)             \
80 {                                                                                               \
81         struct ia64_fpreg fr[6];                                                                \
82         efi_status_t ret;                                                                       \
83                                                                                                 \
84         ia64_save_scratch_fpregs(fr);                                                           \
85         ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time),       \
86                                 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm));      \
87         ia64_load_scratch_fpregs(fr);                                                           \
88         return ret;                                                                             \
89 }
90
91 #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg)                                                \
92 static efi_status_t                                                                             \
93 prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)                                   \
94 {                                                                                               \
95         struct ia64_fpreg fr[6];                                                                \
96         efi_time_t *atm = NULL;                                                                 \
97         efi_status_t ret;                                                                       \
98                                                                                                 \
99         if (tm)                                                                                 \
100                 atm = adjust_arg(tm);                                                           \
101         ia64_save_scratch_fpregs(fr);                                                           \
102         ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time),       \
103                                 enabled, atm);                                                  \
104         ia64_load_scratch_fpregs(fr);                                                           \
105         return ret;                                                                             \
106 }
107
108 #define STUB_GET_VARIABLE(prefix, adjust_arg)                                           \
109 static efi_status_t                                                                     \
110 prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,               \
111                        unsigned long *data_size, void *data)                            \
112 {                                                                                       \
113         struct ia64_fpreg fr[6];                                                        \
114         u32 *aattr = NULL;                                                                      \
115         efi_status_t ret;                                                               \
116                                                                                         \
117         if (attr)                                                                       \
118                 aattr = adjust_arg(attr);                                               \
119         ia64_save_scratch_fpregs(fr);                                                   \
120         ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable),     \
121                                 adjust_arg(name), adjust_arg(vendor), aattr,            \
122                                 adjust_arg(data_size), adjust_arg(data));               \
123         ia64_load_scratch_fpregs(fr);                                                   \
124         return ret;                                                                     \
125 }
126
127 #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg)                                              \
128 static efi_status_t                                                                             \
129 prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor)   \
130 {                                                                                               \
131         struct ia64_fpreg fr[6];                                                                \
132         efi_status_t ret;                                                                       \
133                                                                                                 \
134         ia64_save_scratch_fpregs(fr);                                                           \
135         ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable),   \
136                                 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor));   \
137         ia64_load_scratch_fpregs(fr);                                                           \
138         return ret;                                                                             \
139 }
140
141 #define STUB_SET_VARIABLE(prefix, adjust_arg)                                           \
142 static efi_status_t                                                                     \
143 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr,      \
144                        unsigned long data_size, void *data)                             \
145 {                                                                                       \
146         struct ia64_fpreg fr[6];                                                        \
147         efi_status_t ret;                                                               \
148                                                                                         \
149         ia64_save_scratch_fpregs(fr);                                                   \
150         ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable),     \
151                                 adjust_arg(name), adjust_arg(vendor), attr, data_size,  \
152                                 adjust_arg(data));                                      \
153         ia64_load_scratch_fpregs(fr);                                                   \
154         return ret;                                                                     \
155 }
156
157 #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg)                                       \
158 static efi_status_t                                                                             \
159 prefix##_get_next_high_mono_count (u32 *count)                                                  \
160 {                                                                                               \
161         struct ia64_fpreg fr[6];                                                                \
162         efi_status_t ret;                                                                       \
163                                                                                                 \
164         ia64_save_scratch_fpregs(fr);                                                           \
165         ret = efi_call_##prefix((efi_get_next_high_mono_count_t *)                              \
166                                 __va(runtime->get_next_high_mono_count), adjust_arg(count));    \
167         ia64_load_scratch_fpregs(fr);                                                           \
168         return ret;                                                                             \
169 }
170
171 #define STUB_RESET_SYSTEM(prefix, adjust_arg)                                   \
172 static void                                                                     \
173 prefix##_reset_system (int reset_type, efi_status_t status,                     \
174                        unsigned long data_size, efi_char16_t *data)             \
175 {                                                                               \
176         struct ia64_fpreg fr[6];                                                \
177         efi_char16_t *adata = NULL;                                             \
178                                                                                 \
179         if (data)                                                               \
180                 adata = adjust_arg(data);                                       \
181                                                                                 \
182         ia64_save_scratch_fpregs(fr);                                           \
183         efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system),   \
184                           reset_type, status, data_size, adata);                \
185         /* should not return, but just in case... */                            \
186         ia64_load_scratch_fpregs(fr);                                           \
187 }
188
189 #define phys_ptr(arg)   ((__typeof__(arg)) ia64_tpa(arg))
190
191 STUB_GET_TIME(phys, phys_ptr)
192 STUB_SET_TIME(phys, phys_ptr)
193 STUB_GET_WAKEUP_TIME(phys, phys_ptr)
194 STUB_SET_WAKEUP_TIME(phys, phys_ptr)
195 STUB_GET_VARIABLE(phys, phys_ptr)
196 STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
197 STUB_SET_VARIABLE(phys, phys_ptr)
198 STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
199 STUB_RESET_SYSTEM(phys, phys_ptr)
200
201 #define id(arg) arg
202
203 STUB_GET_TIME(virt, id)
204 STUB_SET_TIME(virt, id)
205 STUB_GET_WAKEUP_TIME(virt, id)
206 STUB_SET_WAKEUP_TIME(virt, id)
207 STUB_GET_VARIABLE(virt, id)
208 STUB_GET_NEXT_VARIABLE(virt, id)
209 STUB_SET_VARIABLE(virt, id)
210 STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
211 STUB_RESET_SYSTEM(virt, id)
212
213 void
214 efi_gettimeofday (struct timespec *ts)
215 {
216         efi_time_t tm;
217
218         memset(ts, 0, sizeof(ts));
219         if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
220                 return;
221
222         ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
223         ts->tv_nsec = tm.nanosecond;
224 }
225
226 static int
227 is_available_memory (efi_memory_desc_t *md)
228 {
229         if (!(md->attribute & EFI_MEMORY_WB))
230                 return 0;
231
232         switch (md->type) {
233               case EFI_LOADER_CODE:
234               case EFI_LOADER_DATA:
235               case EFI_BOOT_SERVICES_CODE:
236               case EFI_BOOT_SERVICES_DATA:
237               case EFI_CONVENTIONAL_MEMORY:
238                 return 1;
239         }
240         return 0;
241 }
242
243 typedef struct kern_memdesc {
244         u64 attribute;
245         u64 start;
246         u64 num_pages;
247 } kern_memdesc_t;
248
249 static kern_memdesc_t *kern_memmap;
250
251 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
252
253 static inline u64
254 kmd_end(kern_memdesc_t *kmd)
255 {
256         return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
257 }
258
259 static inline u64
260 efi_md_end(efi_memory_desc_t *md)
261 {
262         return (md->phys_addr + efi_md_size(md));
263 }
264
265 static inline int
266 efi_wb(efi_memory_desc_t *md)
267 {
268         return (md->attribute & EFI_MEMORY_WB);
269 }
270
271 static inline int
272 efi_uc(efi_memory_desc_t *md)
273 {
274         return (md->attribute & EFI_MEMORY_UC);
275 }
276
277 static void
278 walk (efi_freemem_callback_t callback, void *arg, u64 attr)
279 {
280         kern_memdesc_t *k;
281         u64 start, end, voff;
282
283         voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
284         for (k = kern_memmap; k->start != ~0UL; k++) {
285                 if (k->attribute != attr)
286                         continue;
287                 start = PAGE_ALIGN(k->start);
288                 end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
289                 if (start < end)
290                         if ((*callback)(start + voff, end + voff, arg) < 0)
291                                 return;
292         }
293 }
294
295 /*
296  * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
297  * has memory that is available for OS use.
298  */
299 void
300 efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
301 {
302         walk(callback, arg, EFI_MEMORY_WB);
303 }
304
305 /*
306  * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
307  * has memory that is available for uncached allocator.
308  */
309 void
310 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
311 {
312         walk(callback, arg, EFI_MEMORY_UC);
313 }
314
315 /*
316  * Look for the PAL_CODE region reported by EFI and maps it using an
317  * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor
318  * Abstraction Layer chapter 11 in ADAG
319  */
320
321 void *
322 efi_get_pal_addr (void)
323 {
324         void *efi_map_start, *efi_map_end, *p;
325         efi_memory_desc_t *md;
326         u64 efi_desc_size;
327         int pal_code_count = 0;
328         u64 vaddr, mask;
329
330         efi_map_start = __va(ia64_boot_param->efi_memmap);
331         efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
332         efi_desc_size = ia64_boot_param->efi_memdesc_size;
333
334         for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
335                 md = p;
336                 if (md->type != EFI_PAL_CODE)
337                         continue;
338
339                 if (++pal_code_count > 1) {
340                         printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
341                                md->phys_addr);
342                         continue;
343                 }
344                 /*
345                  * The only ITLB entry in region 7 that is used is the one installed by
346                  * __start().  That entry covers a 64MB range.
347                  */
348                 mask  = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
349                 vaddr = PAGE_OFFSET + md->phys_addr;
350
351                 /*
352                  * We must check that the PAL mapping won't overlap with the kernel
353                  * mapping.
354                  *
355                  * PAL code is guaranteed to be aligned on a power of 2 between 4k and
356                  * 256KB and that only one ITR is needed to map it. This implies that the
357                  * PAL code is always aligned on its size, i.e., the closest matching page
358                  * size supported by the TLB. Therefore PAL code is guaranteed never to
359                  * cross a 64MB unless it is bigger than 64MB (very unlikely!).  So for
360                  * now the following test is enough to determine whether or not we need a
361                  * dedicated ITR for the PAL code.
362                  */
363                 if ((vaddr & mask) == (KERNEL_START & mask)) {
364                         printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
365                                __FUNCTION__);
366                         continue;
367                 }
368
369                 if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
370                         panic("Woah!  PAL code size bigger than a granule!");
371
372 #if EFI_DEBUG
373                 mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
374
375                 printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
376                         smp_processor_id(), md->phys_addr,
377                         md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
378                         vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
379 #endif
380                 return __va(md->phys_addr);
381         }
382         printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
383                __FUNCTION__);
384         return NULL;
385 }
386
387 void
388 efi_map_pal_code (void)
389 {
390         void *pal_vaddr = efi_get_pal_addr ();
391         u64 psr;
392
393         if (!pal_vaddr)
394                 return;
395
396         /*
397          * Cannot write to CRx with PSR.ic=1
398          */
399         psr = ia64_clear_ic();
400         ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
401                  pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
402                  IA64_GRANULE_SHIFT);
403         ia64_set_psr(psr);              /* restore psr */
404         ia64_srlz_i();
405 }
406
407 void __init
408 efi_init (void)
409 {
410         void *efi_map_start, *efi_map_end;
411         efi_config_table_t *config_tables;
412         efi_char16_t *c16;
413         u64 efi_desc_size;
414         char *cp, vendor[100] = "unknown";
415         extern char saved_command_line[];
416         int i;
417
418         /* it's too early to be able to use the standard kernel command line support... */
419         for (cp = saved_command_line; *cp; ) {
420                 if (memcmp(cp, "mem=", 4) == 0) {
421                         mem_limit = memparse(cp + 4, &cp);
422                 } else if (memcmp(cp, "max_addr=", 9) == 0) {
423                         max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
424                 } else {
425                         while (*cp != ' ' && *cp)
426                                 ++cp;
427                         while (*cp == ' ')
428                                 ++cp;
429                 }
430         }
431         if (max_addr != ~0UL)
432                 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
433
434         efi.systab = __va(ia64_boot_param->efi_systab);
435
436         /*
437          * Verify the EFI Table
438          */
439         if (efi.systab == NULL)
440                 panic("Woah! Can't find EFI system table.\n");
441         if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
442                 panic("Woah! EFI system table signature incorrect\n");
443         if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0)
444                 printk(KERN_WARNING "Warning: EFI system table major version mismatch: "
445                        "got %d.%02d, expected %d.%02d\n",
446                        efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff,
447                        EFI_SYSTEM_TABLE_REVISION >> 16, EFI_SYSTEM_TABLE_REVISION & 0xffff);
448
449         config_tables = __va(efi.systab->tables);
450
451         /* Show what we know for posterity */
452         c16 = __va(efi.systab->fw_vendor);
453         if (c16) {
454                 for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)
455                         vendor[i] = *c16++;
456                 vendor[i] = '\0';
457         }
458
459         printk(KERN_INFO "EFI v%u.%.02u by %s:",
460                efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
461
462         efi.mps        = EFI_INVALID_TABLE_ADDR;
463         efi.acpi       = EFI_INVALID_TABLE_ADDR;
464         efi.acpi20     = EFI_INVALID_TABLE_ADDR;
465         efi.smbios     = EFI_INVALID_TABLE_ADDR;
466         efi.sal_systab = EFI_INVALID_TABLE_ADDR;
467         efi.boot_info  = EFI_INVALID_TABLE_ADDR;
468         efi.hcdp       = EFI_INVALID_TABLE_ADDR;
469         efi.uga        = EFI_INVALID_TABLE_ADDR;
470
471         for (i = 0; i < (int) efi.systab->nr_tables; i++) {
472                 if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
473                         efi.mps = config_tables[i].table;
474                         printk(" MPS=0x%lx", config_tables[i].table);
475                 } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
476                         efi.acpi20 = config_tables[i].table;
477                         printk(" ACPI 2.0=0x%lx", config_tables[i].table);
478                 } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
479                         efi.acpi = config_tables[i].table;
480                         printk(" ACPI=0x%lx", config_tables[i].table);
481                 } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
482                         efi.smbios = config_tables[i].table;
483                         printk(" SMBIOS=0x%lx", config_tables[i].table);
484                 } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
485                         efi.sal_systab = config_tables[i].table;
486                         printk(" SALsystab=0x%lx", config_tables[i].table);
487                 } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
488                         efi.hcdp = config_tables[i].table;
489                         printk(" HCDP=0x%lx", config_tables[i].table);
490                 }
491         }
492         printk("\n");
493
494         runtime = __va(efi.systab->runtime);
495         efi.get_time = phys_get_time;
496         efi.set_time = phys_set_time;
497         efi.get_wakeup_time = phys_get_wakeup_time;
498         efi.set_wakeup_time = phys_set_wakeup_time;
499         efi.get_variable = phys_get_variable;
500         efi.get_next_variable = phys_get_next_variable;
501         efi.set_variable = phys_set_variable;
502         efi.get_next_high_mono_count = phys_get_next_high_mono_count;
503         efi.reset_system = phys_reset_system;
504
505         efi_map_start = __va(ia64_boot_param->efi_memmap);
506         efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
507         efi_desc_size = ia64_boot_param->efi_memdesc_size;
508
509 #if EFI_DEBUG
510         /* print EFI memory map: */
511         {
512                 efi_memory_desc_t *md;
513                 void *p;
514
515                 for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
516                         md = p;
517                         printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
518                                i, md->type, md->attribute, md->phys_addr,
519                                md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
520                                md->num_pages >> (20 - EFI_PAGE_SHIFT));
521                 }
522         }
523 #endif
524
525         efi_map_pal_code();
526         efi_enter_virtual_mode();
527 }
528
529 void
530 efi_enter_virtual_mode (void)
531 {
532         void *efi_map_start, *efi_map_end, *p;
533         efi_memory_desc_t *md;
534         efi_status_t status;
535         u64 efi_desc_size;
536
537         efi_map_start = __va(ia64_boot_param->efi_memmap);
538         efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
539         efi_desc_size = ia64_boot_param->efi_memdesc_size;
540
541         for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
542                 md = p;
543                 if (md->attribute & EFI_MEMORY_RUNTIME) {
544                         /*
545                          * Some descriptors have multiple bits set, so the order of
546                          * the tests is relevant.
547                          */
548                         if (md->attribute & EFI_MEMORY_WB) {
549                                 md->virt_addr = (u64) __va(md->phys_addr);
550                         } else if (md->attribute & EFI_MEMORY_UC) {
551                                 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
552                         } else if (md->attribute & EFI_MEMORY_WC) {
553 #if 0
554                                 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
555                                                                            | _PAGE_D
556                                                                            | _PAGE_MA_WC
557                                                                            | _PAGE_PL_0
558                                                                            | _PAGE_AR_RW));
559 #else
560                                 printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
561                                 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
562 #endif
563                         } else if (md->attribute & EFI_MEMORY_WT) {
564 #if 0
565                                 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
566                                                                            | _PAGE_D | _PAGE_MA_WT
567                                                                            | _PAGE_PL_0
568                                                                            | _PAGE_AR_RW));
569 #else
570                                 printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
571                                 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
572 #endif
573                         }
574                 }
575         }
576
577         status = efi_call_phys(__va(runtime->set_virtual_address_map),
578                                ia64_boot_param->efi_memmap_size,
579                                efi_desc_size, ia64_boot_param->efi_memdesc_version,
580                                ia64_boot_param->efi_memmap);
581         if (status != EFI_SUCCESS) {
582                 printk(KERN_WARNING "warning: unable to switch EFI into virtual mode "
583                        "(status=%lu)\n", status);
584                 return;
585         }
586
587         /*
588          * Now that EFI is in virtual mode, we call the EFI functions more efficiently:
589          */
590         efi.get_time = virt_get_time;
591         efi.set_time = virt_set_time;
592         efi.get_wakeup_time = virt_get_wakeup_time;
593         efi.set_wakeup_time = virt_set_wakeup_time;
594         efi.get_variable = virt_get_variable;
595         efi.get_next_variable = virt_get_next_variable;
596         efi.set_variable = virt_set_variable;
597         efi.get_next_high_mono_count = virt_get_next_high_mono_count;
598         efi.reset_system = virt_reset_system;
599 }
600
601 /*
602  * Walk the EFI memory map looking for the I/O port range.  There can only be one entry of
603  * this type, other I/O port ranges should be described via ACPI.
604  */
605 u64
606 efi_get_iobase (void)
607 {
608         void *efi_map_start, *efi_map_end, *p;
609         efi_memory_desc_t *md;
610         u64 efi_desc_size;
611
612         efi_map_start = __va(ia64_boot_param->efi_memmap);
613         efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
614         efi_desc_size = ia64_boot_param->efi_memdesc_size;
615
616         for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
617                 md = p;
618                 if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
619                         if (md->attribute & EFI_MEMORY_UC)
620                                 return md->phys_addr;
621                 }
622         }
623         return 0;
624 }
625
626 static struct kern_memdesc *
627 kern_memory_descriptor (unsigned long phys_addr)
628 {
629         struct kern_memdesc *md;
630
631         for (md = kern_memmap; md->start != ~0UL; md++) {
632                 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
633                          return md;
634         }
635         return NULL;
636 }
637
638 static efi_memory_desc_t *
639 efi_memory_descriptor (unsigned long phys_addr)
640 {
641         void *efi_map_start, *efi_map_end, *p;
642         efi_memory_desc_t *md;
643         u64 efi_desc_size;
644
645         efi_map_start = __va(ia64_boot_param->efi_memmap);
646         efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
647         efi_desc_size = ia64_boot_param->efi_memdesc_size;
648
649         for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
650                 md = p;
651
652                 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
653                          return md;
654         }
655         return NULL;
656 }
657
658 u32
659 efi_mem_type (unsigned long phys_addr)
660 {
661         efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
662
663         if (md)
664                 return md->type;
665         return 0;
666 }
667
668 u64
669 efi_mem_attributes (unsigned long phys_addr)
670 {
671         efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
672
673         if (md)
674                 return md->attribute;
675         return 0;
676 }
677 EXPORT_SYMBOL(efi_mem_attributes);
678
679 u64
680 efi_mem_attribute (unsigned long phys_addr, unsigned long size)
681 {
682         unsigned long end = phys_addr + size;
683         efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
684         u64 attr;
685
686         if (!md)
687                 return 0;
688
689         /*
690          * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
691          * the kernel that firmware needs this region mapped.
692          */
693         attr = md->attribute & ~EFI_MEMORY_RUNTIME;
694         do {
695                 unsigned long md_end = efi_md_end(md);
696
697                 if (end <= md_end)
698                         return attr;
699
700                 md = efi_memory_descriptor(md_end);
701                 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
702                         return 0;
703         } while (md);
704         return 0;
705 }
706
707 u64
708 kern_mem_attribute (unsigned long phys_addr, unsigned long size)
709 {
710         unsigned long end = phys_addr + size;
711         struct kern_memdesc *md;
712         u64 attr;
713
714         /*
715          * This is a hack for ioremap calls before we set up kern_memmap.
716          * Maybe we should do efi_memmap_init() earlier instead.
717          */
718         if (!kern_memmap) {
719                 attr = efi_mem_attribute(phys_addr, size);
720                 if (attr & EFI_MEMORY_WB)
721                         return EFI_MEMORY_WB;
722                 return 0;
723         }
724
725         md = kern_memory_descriptor(phys_addr);
726         if (!md)
727                 return 0;
728
729         attr = md->attribute;
730         do {
731                 unsigned long md_end = kmd_end(md);
732
733                 if (end <= md_end)
734                         return attr;
735
736                 md = kern_memory_descriptor(md_end);
737                 if (!md || md->attribute != attr)
738                         return 0;
739         } while (md);
740         return 0;
741 }
742 EXPORT_SYMBOL(kern_mem_attribute);
743
744 int
745 valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
746 {
747         u64 attr;
748
749         /*
750          * /dev/mem reads and writes use copy_to_user(), which implicitly
751          * uses a granule-sized kernel identity mapping.  It's really
752          * only safe to do this for regions in kern_memmap.  For more
753          * details, see Documentation/ia64/aliasing.txt.
754          */
755         attr = kern_mem_attribute(phys_addr, size);
756         if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
757                 return 1;
758         return 0;
759 }
760
761 int
762 valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
763 {
764         /*
765          * MMIO regions are often missing from the EFI memory map.
766          * We must allow mmap of them for programs like X, so we
767          * currently can't do any useful validation.
768          */
769         return 1;
770 }
771
772 pgprot_t
773 phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
774                      pgprot_t vma_prot)
775 {
776         unsigned long phys_addr = pfn << PAGE_SHIFT;
777         u64 attr;
778
779         /*
780          * For /dev/mem mmap, we use user mappings, but if the region is
781          * in kern_memmap (and hence may be covered by a kernel mapping),
782          * we must use the same attribute as the kernel mapping.
783          */
784         attr = kern_mem_attribute(phys_addr, size);
785         if (attr & EFI_MEMORY_WB)
786                 return pgprot_cacheable(vma_prot);
787         else if (attr & EFI_MEMORY_UC)
788                 return pgprot_noncached(vma_prot);
789
790         /*
791          * Some chipsets don't support UC access to memory.  If
792          * WB is supported, we prefer that.
793          */
794         if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
795                 return pgprot_cacheable(vma_prot);
796
797         return pgprot_noncached(vma_prot);
798 }
799
800 int __init
801 efi_uart_console_only(void)
802 {
803         efi_status_t status;
804         char *s, name[] = "ConOut";
805         efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
806         efi_char16_t *utf16, name_utf16[32];
807         unsigned char data[1024];
808         unsigned long size = sizeof(data);
809         struct efi_generic_dev_path *hdr, *end_addr;
810         int uart = 0;
811
812         /* Convert to UTF-16 */
813         utf16 = name_utf16;
814         s = name;
815         while (*s)
816                 *utf16++ = *s++ & 0x7f;
817         *utf16 = 0;
818
819         status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
820         if (status != EFI_SUCCESS) {
821                 printk(KERN_ERR "No EFI %s variable?\n", name);
822                 return 0;
823         }
824
825         hdr = (struct efi_generic_dev_path *) data;
826         end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
827         while (hdr < end_addr) {
828                 if (hdr->type == EFI_DEV_MSG &&
829                     hdr->sub_type == EFI_DEV_MSG_UART)
830                         uart = 1;
831                 else if (hdr->type == EFI_DEV_END_PATH ||
832                           hdr->type == EFI_DEV_END_PATH2) {
833                         if (!uart)
834                                 return 0;
835                         if (hdr->sub_type == EFI_DEV_END_ENTIRE)
836                                 return 1;
837                         uart = 0;
838                 }
839                 hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);
840         }
841         printk(KERN_ERR "Malformed %s value\n", name);
842         return 0;
843 }
844
845 /*
846  * Look for the first granule aligned memory descriptor memory
847  * that is big enough to hold EFI memory map. Make sure this
848  * descriptor is atleast granule sized so it does not get trimmed
849  */
850 struct kern_memdesc *
851 find_memmap_space (void)
852 {
853         u64     contig_low=0, contig_high=0;
854         u64     as = 0, ae;
855         void *efi_map_start, *efi_map_end, *p, *q;
856         efi_memory_desc_t *md, *pmd = NULL, *check_md;
857         u64     space_needed, efi_desc_size;
858         unsigned long total_mem = 0;
859
860         efi_map_start = __va(ia64_boot_param->efi_memmap);
861         efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
862         efi_desc_size = ia64_boot_param->efi_memdesc_size;
863
864         /*
865          * Worst case: we need 3 kernel descriptors for each efi descriptor
866          * (if every entry has a WB part in the middle, and UC head and tail),
867          * plus one for the end marker.
868          */
869         space_needed = sizeof(kern_memdesc_t) *
870                 (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
871
872         for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
873                 md = p;
874                 if (!efi_wb(md)) {
875                         continue;
876                 }
877                 if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
878                         contig_low = GRANULEROUNDUP(md->phys_addr);
879                         contig_high = efi_md_end(md);
880                         for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
881                                 check_md = q;
882                                 if (!efi_wb(check_md))
883                                         break;
884                                 if (contig_high != check_md->phys_addr)
885                                         break;
886                                 contig_high = efi_md_end(check_md);
887                         }
888                         contig_high = GRANULEROUNDDOWN(contig_high);
889                 }
890                 if (!is_available_memory(md) || md->type == EFI_LOADER_DATA)
891                         continue;
892
893                 /* Round ends inward to granule boundaries */
894                 as = max(contig_low, md->phys_addr);
895                 ae = min(contig_high, efi_md_end(md));
896
897                 /* keep within max_addr= command line arg */
898                 ae = min(ae, max_addr);
899                 if (ae <= as)
900                         continue;
901
902                 /* avoid going over mem= command line arg */
903                 if (total_mem + (ae - as) > mem_limit)
904                         ae -= total_mem + (ae - as) - mem_limit;
905
906                 if (ae <= as)
907                         continue;
908
909                 if (ae - as > space_needed)
910                         break;
911         }
912         if (p >= efi_map_end)
913                 panic("Can't allocate space for kernel memory descriptors");
914
915         return __va(as);
916 }
917
918 /*
919  * Walk the EFI memory map and gather all memory available for kernel
920  * to use.  We can allocate partial granules only if the unavailable
921  * parts exist, and are WB.
922  */
923 void
924 efi_memmap_init(unsigned long *s, unsigned long *e)
925 {
926         struct kern_memdesc *k, *prev = NULL;
927         u64     contig_low=0, contig_high=0;
928         u64     as, ae, lim;
929         void *efi_map_start, *efi_map_end, *p, *q;
930         efi_memory_desc_t *md, *pmd = NULL, *check_md;
931         u64     efi_desc_size;
932         unsigned long total_mem = 0;
933
934         k = kern_memmap = find_memmap_space();
935
936         efi_map_start = __va(ia64_boot_param->efi_memmap);
937         efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
938         efi_desc_size = ia64_boot_param->efi_memdesc_size;
939
940         for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
941                 md = p;
942                 if (!efi_wb(md)) {
943                         if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY ||
944                                            md->type == EFI_BOOT_SERVICES_DATA)) {
945                                 k->attribute = EFI_MEMORY_UC;
946                                 k->start = md->phys_addr;
947                                 k->num_pages = md->num_pages;
948                                 k++;
949                         }
950                         continue;
951                 }
952                 if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
953                         contig_low = GRANULEROUNDUP(md->phys_addr);
954                         contig_high = efi_md_end(md);
955                         for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
956                                 check_md = q;
957                                 if (!efi_wb(check_md))
958                                         break;
959                                 if (contig_high != check_md->phys_addr)
960                                         break;
961                                 contig_high = efi_md_end(check_md);
962                         }
963                         contig_high = GRANULEROUNDDOWN(contig_high);
964                 }
965                 if (!is_available_memory(md))
966                         continue;
967
968                 /*
969                  * Round ends inward to granule boundaries
970                  * Give trimmings to uncached allocator
971                  */
972                 if (md->phys_addr < contig_low) {
973                         lim = min(efi_md_end(md), contig_low);
974                         if (efi_uc(md)) {
975                                 if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC &&
976                                     kmd_end(k-1) == md->phys_addr) {
977                                         (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
978                                 } else {
979                                         k->attribute = EFI_MEMORY_UC;
980                                         k->start = md->phys_addr;
981                                         k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
982                                         k++;
983                                 }
984                         }
985                         as = contig_low;
986                 } else
987                         as = md->phys_addr;
988
989                 if (efi_md_end(md) > contig_high) {
990                         lim = max(md->phys_addr, contig_high);
991                         if (efi_uc(md)) {
992                                 if (lim == md->phys_addr && k > kern_memmap &&
993                                     (k-1)->attribute == EFI_MEMORY_UC &&
994                                     kmd_end(k-1) == md->phys_addr) {
995                                         (k-1)->num_pages += md->num_pages;
996                                 } else {
997                                         k->attribute = EFI_MEMORY_UC;
998                                         k->start = lim;
999                                         k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT;
1000                                         k++;
1001                                 }
1002                         }
1003                         ae = contig_high;
1004                 } else
1005                         ae = efi_md_end(md);
1006
1007                 /* keep within max_addr= command line arg */
1008                 ae = min(ae, max_addr);
1009                 if (ae <= as)
1010                         continue;
1011
1012                 /* avoid going over mem= command line arg */
1013                 if (total_mem + (ae - as) > mem_limit)
1014                         ae -= total_mem + (ae - as) - mem_limit;
1015
1016                 if (ae <= as)
1017                         continue;
1018                 if (prev && kmd_end(prev) == md->phys_addr) {
1019                         prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
1020                         total_mem += ae - as;
1021                         continue;
1022                 }
1023                 k->attribute = EFI_MEMORY_WB;
1024                 k->start = as;
1025                 k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
1026                 total_mem += ae - as;
1027                 prev = k++;
1028         }
1029         k->start = ~0L; /* end-marker */
1030
1031         /* reserve the memory we are using for kern_memmap */
1032         *s = (u64)kern_memmap;
1033         *e = (u64)++k;
1034 }
1035
1036 void
1037 efi_initialize_iomem_resources(struct resource *code_resource,
1038                                struct resource *data_resource)
1039 {
1040         struct resource *res;
1041         void *efi_map_start, *efi_map_end, *p;
1042         efi_memory_desc_t *md;
1043         u64 efi_desc_size;
1044         char *name;
1045         unsigned long flags;
1046
1047         efi_map_start = __va(ia64_boot_param->efi_memmap);
1048         efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
1049         efi_desc_size = ia64_boot_param->efi_memdesc_size;
1050
1051         res = NULL;
1052
1053         for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1054                 md = p;
1055
1056                 if (md->num_pages == 0) /* should not happen */
1057                         continue;
1058
1059                 flags = IORESOURCE_MEM;
1060                 switch (md->type) {
1061
1062                         case EFI_MEMORY_MAPPED_IO:
1063                         case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
1064                                 continue;
1065
1066                         case EFI_LOADER_CODE:
1067                         case EFI_LOADER_DATA:
1068                         case EFI_BOOT_SERVICES_DATA:
1069                         case EFI_BOOT_SERVICES_CODE:
1070                         case EFI_CONVENTIONAL_MEMORY:
1071                                 if (md->attribute & EFI_MEMORY_WP) {
1072                                         name = "System ROM";
1073                                         flags |= IORESOURCE_READONLY;
1074                                 } else {
1075                                         name = "System RAM";
1076                                 }
1077                                 break;
1078
1079                         case EFI_ACPI_MEMORY_NVS:
1080                                 name = "ACPI Non-volatile Storage";
1081                                 flags |= IORESOURCE_BUSY;
1082                                 break;
1083
1084                         case EFI_UNUSABLE_MEMORY:
1085                                 name = "reserved";
1086                                 flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED;
1087                                 break;
1088
1089                         case EFI_RESERVED_TYPE:
1090                         case EFI_RUNTIME_SERVICES_CODE:
1091                         case EFI_RUNTIME_SERVICES_DATA:
1092                         case EFI_ACPI_RECLAIM_MEMORY:
1093                         default:
1094                                 name = "reserved";
1095                                 flags |= IORESOURCE_BUSY;
1096                                 break;
1097                 }
1098
1099                 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
1100                         printk(KERN_ERR "failed to alocate resource for iomem\n");
1101                         return;
1102                 }
1103
1104                 res->name = name;
1105                 res->start = md->phys_addr;
1106                 res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
1107                 res->flags = flags;
1108
1109                 if (insert_resource(&iomem_resource, res) < 0)
1110                         kfree(res);
1111                 else {
1112                         /*
1113                          * We don't know which region contains
1114                          * kernel data so we try it repeatedly and
1115                          * let the resource manager test it.
1116                          */
1117                         insert_resource(res, code_resource);
1118                         insert_resource(res, data_resource);
1119                 }
1120         }
1121 }