2 * linux/arch/alpha/kernel/setup.c
4 * Copyright (C) 1995 Linus Torvalds
7 /* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
16 #include <linux/stddef.h>
17 #include <linux/unistd.h>
18 #include <linux/ptrace.h>
19 #include <linux/slab.h>
20 #include <linux/user.h>
21 #include <linux/a.out.h>
22 #include <linux/tty.h>
23 #include <linux/delay.h>
24 #include <linux/config.h> /* CONFIG_ALPHA_LCA etc */
25 #include <linux/mc146818rtc.h>
26 #include <linux/console.h>
27 #include <linux/errno.h>
28 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/ioport.h>
31 #include <linux/platform_device.h>
32 #include <linux/bootmem.h>
33 #include <linux/pci.h>
34 #include <linux/seq_file.h>
35 #include <linux/root_dev.h>
36 #include <linux/initrd.h>
37 #include <linux/eisa.h>
38 #include <linux/pfn.h>
39 #ifdef CONFIG_MAGIC_SYSRQ
40 #include <linux/sysrq.h>
41 #include <linux/reboot.h>
43 #include <linux/notifier.h>
44 #include <asm/setup.h>
47 extern struct atomic_notifier_head panic_notifier_list;
48 static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
49 static struct notifier_block alpha_panic_block = {
52 INT_MAX /* try to do it first */
55 #include <asm/uaccess.h>
56 #include <asm/pgtable.h>
57 #include <asm/system.h>
58 #include <asm/hwrpb.h>
61 #include <asm/mmu_context.h>
62 #include <asm/console.h>
68 struct hwrpb_struct *hwrpb;
69 unsigned long srm_hae;
71 int alpha_l1i_cacheshape;
72 int alpha_l1d_cacheshape;
73 int alpha_l2_cacheshape;
74 int alpha_l3_cacheshape;
76 #ifdef CONFIG_VERBOSE_MCHECK
77 /* 0=minimum, 1=verbose, 2=all */
78 /* These can be overridden via the command line, ie "verbose_mcheck=2") */
79 unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
82 /* Which processor we booted from. */
86 * Using SRM callbacks for initial console output. This works from
87 * setup_arch() time through the end of time_init(), as those places
88 * are under our (Alpha) control.
90 * "srmcons" specified in the boot command arguments allows us to
91 * see kernel messages during the period of time before the true
92 * console device is "registered" during console_init().
93 * As of this version (2.5.59), console_init() will call
94 * disable_early_printk() as the last action before initializing
95 * the console drivers. That's the last possible time srmcons can be
96 * unregistered without interfering with console behavior.
98 * By default, OFF; set it with a bootcommand arg of "srmcons" or
99 * "console=srm". The meaning of these two args is:
100 * "srmcons" - early callback prints
101 * "console=srm" - full callback based console, including early prints
103 int srmcons_output = 0;
105 /* Enforce a memory size limit; useful for testing. By default, none. */
106 unsigned long mem_size_limit = 0;
108 /* Set AGP GART window size (0 means disabled). */
109 unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
111 #ifdef CONFIG_ALPHA_GENERIC
112 struct alpha_machine_vector alpha_mv;
116 #define N(a) (sizeof(a)/sizeof(a[0]))
118 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
120 static struct alpha_machine_vector *get_sysvec_byname(const char *);
121 static void get_sysnames(unsigned long, unsigned long, unsigned long,
123 static void determine_cpu_caches (unsigned int);
125 static char command_line[COMMAND_LINE_SIZE];
128 * The format of "screen_info" is strange, and due to early
129 * i386-setup code. This is just enough to make the console
130 * code think we're on a VGA color display.
133 struct screen_info screen_info = {
136 .orig_video_cols = 80,
137 .orig_video_lines = 25,
138 .orig_video_isVGA = 1,
139 .orig_video_points = 16
143 * The direct map I/O window, if any. This should be the same
144 * for all busses, since it's used by virt_to_bus.
147 unsigned long __direct_map_base;
148 unsigned long __direct_map_size;
151 * Declare all of the machine vectors.
154 /* GCC 2.7.2 (on alpha at least) is lame. It does not support either
155 __attribute__((weak)) or #pragma weak. Bypass it and talk directly
159 extern struct alpha_machine_vector X; \
179 WEAK(mikasa_primo_mv);
184 WEAK(noritake_primo_mv);
192 WEAK(sable_gamma_mv);
205 * I/O resources inherited from PeeCees. Except for perhaps the
206 * turbochannel alphas, everyone has these on some sort of SuperIO chip.
208 * ??? If this becomes less standard, move the struct out into the
213 reserve_std_resources(void)
215 static struct resource standard_io_resources[] = {
216 { .name = "rtc", .start = -1, .end = -1 },
217 { .name = "dma1", .start = 0x00, .end = 0x1f },
218 { .name = "pic1", .start = 0x20, .end = 0x3f },
219 { .name = "timer", .start = 0x40, .end = 0x5f },
220 { .name = "keyboard", .start = 0x60, .end = 0x6f },
221 { .name = "dma page reg", .start = 0x80, .end = 0x8f },
222 { .name = "pic2", .start = 0xa0, .end = 0xbf },
223 { .name = "dma2", .start = 0xc0, .end = 0xdf },
226 struct resource *io = &ioport_resource;
230 struct pci_controller *hose;
231 for (hose = hose_head; hose; hose = hose->next)
232 if (hose->index == 0) {
238 /* Fix up for the Jensen's queer RTC placement. */
239 standard_io_resources[0].start = RTC_PORT(0);
240 standard_io_resources[0].end = RTC_PORT(0) + 0x10;
242 for (i = 0; i < N(standard_io_resources); ++i)
243 request_resource(io, standard_io_resources+i);
246 #define PFN_MAX PFN_DOWN(0x80000000)
247 #define for_each_mem_cluster(memdesc, cluster, i) \
248 for ((cluster) = (memdesc)->cluster, (i) = 0; \
249 (i) < (memdesc)->numclusters; (i)++, (cluster)++)
251 static unsigned long __init
252 get_mem_size_limit(char *s)
254 unsigned long end = 0;
257 end = simple_strtoul(from, &from, 0);
258 if ( *from == 'K' || *from == 'k' ) {
261 } else if ( *from == 'M' || *from == 'm' ) {
264 } else if ( *from == 'G' || *from == 'g' ) {
268 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
271 #ifdef CONFIG_BLK_DEV_INITRD
273 move_initrd(unsigned long mem_limit)
278 size = initrd_end - initrd_start;
279 start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
280 if (!start || __pa(start) + size > mem_limit) {
281 initrd_start = initrd_end = 0;
284 memmove(start, (void *)initrd_start, size);
285 initrd_start = (unsigned long)start;
286 initrd_end = initrd_start + size;
287 printk("initrd moved to %p\n", start);
292 #ifndef CONFIG_DISCONTIGMEM
294 setup_memory(void *kernel_end)
296 struct memclust_struct * cluster;
297 struct memdesc_struct * memdesc;
298 unsigned long start_kernel_pfn, end_kernel_pfn;
299 unsigned long bootmap_size, bootmap_pages, bootmap_start;
300 unsigned long start, end;
303 /* Find free clusters, and init and free the bootmem accordingly. */
304 memdesc = (struct memdesc_struct *)
305 (hwrpb->mddt_offset + (unsigned long) hwrpb);
307 for_each_mem_cluster(memdesc, cluster, i) {
308 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
309 i, cluster->usage, cluster->start_pfn,
310 cluster->start_pfn + cluster->numpages);
312 /* Bit 0 is console/PALcode reserved. Bit 1 is
313 non-volatile memory -- we might want to mark
315 if (cluster->usage & 3)
318 end = cluster->start_pfn + cluster->numpages;
319 if (end > max_low_pfn)
324 * Except for the NUMA systems (wildfire, marvel) all of the
325 * Alpha systems we run on support 32GB of memory or less.
326 * Since the NUMA systems introduce large holes in memory addressing,
327 * we can get into a situation where there is not enough contiguous
328 * memory for the memory map.
330 * Limit memory to the first 32GB to limit the NUMA systems to
331 * memory on their first node (wildfire) or 2 (marvel) to avoid
332 * not being able to produce the memory map. In order to access
333 * all of the memory on the NUMA systems, build with discontiguous
336 * If the user specified a memory limit, let that memory limit stand.
339 mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
341 if (mem_size_limit && max_low_pfn >= mem_size_limit)
343 printk("setup: forcing memory size to %ldK (from %ldK).\n",
344 mem_size_limit << (PAGE_SHIFT - 10),
345 max_low_pfn << (PAGE_SHIFT - 10));
346 max_low_pfn = mem_size_limit;
349 /* Find the bounds of kernel memory. */
350 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
351 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
355 if (max_low_pfn <= end_kernel_pfn)
356 panic("not enough memory to boot");
358 /* We need to know how many physically contiguous pages
359 we'll need for the bootmap. */
360 bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
362 /* Now find a good region where to allocate the bootmap. */
363 for_each_mem_cluster(memdesc, cluster, i) {
364 if (cluster->usage & 3)
367 start = cluster->start_pfn;
368 end = start + cluster->numpages;
369 if (start >= max_low_pfn)
371 if (end > max_low_pfn)
373 if (start < start_kernel_pfn) {
374 if (end > end_kernel_pfn
375 && end - end_kernel_pfn >= bootmap_pages) {
376 bootmap_start = end_kernel_pfn;
378 } else if (end > start_kernel_pfn)
379 end = start_kernel_pfn;
380 } else if (start < end_kernel_pfn)
381 start = end_kernel_pfn;
382 if (end - start >= bootmap_pages) {
383 bootmap_start = start;
388 if (bootmap_start == ~0UL) {
393 /* Allocate the bootmap and mark the whole MM as reserved. */
394 bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
396 /* Mark the free regions. */
397 for_each_mem_cluster(memdesc, cluster, i) {
398 if (cluster->usage & 3)
401 start = cluster->start_pfn;
402 end = cluster->start_pfn + cluster->numpages;
403 if (start >= max_low_pfn)
405 if (end > max_low_pfn)
407 if (start < start_kernel_pfn) {
408 if (end > end_kernel_pfn) {
409 free_bootmem(PFN_PHYS(start),
410 (PFN_PHYS(start_kernel_pfn)
412 printk("freeing pages %ld:%ld\n",
413 start, start_kernel_pfn);
414 start = end_kernel_pfn;
415 } else if (end > start_kernel_pfn)
416 end = start_kernel_pfn;
417 } else if (start < end_kernel_pfn)
418 start = end_kernel_pfn;
422 free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
423 printk("freeing pages %ld:%ld\n", start, end);
426 /* Reserve the bootmap memory. */
427 reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
428 printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
430 #ifdef CONFIG_BLK_DEV_INITRD
431 initrd_start = INITRD_START;
433 initrd_end = initrd_start+INITRD_SIZE;
434 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
435 (void *) initrd_start, INITRD_SIZE);
437 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
438 if (!move_initrd(PFN_PHYS(max_low_pfn)))
439 printk("initrd extends beyond end of memory "
440 "(0x%08lx > 0x%p)\ndisabling initrd\n",
442 phys_to_virt(PFN_PHYS(max_low_pfn)));
444 reserve_bootmem(virt_to_phys((void *)initrd_start),
448 #endif /* CONFIG_BLK_DEV_INITRD */
451 extern void setup_memory(void *);
452 #endif /* !CONFIG_DISCONTIGMEM */
455 page_is_ram(unsigned long pfn)
457 struct memclust_struct * cluster;
458 struct memdesc_struct * memdesc;
461 memdesc = (struct memdesc_struct *)
462 (hwrpb->mddt_offset + (unsigned long) hwrpb);
463 for_each_mem_cluster(memdesc, cluster, i)
465 if (pfn >= cluster->start_pfn &&
466 pfn < cluster->start_pfn + cluster->numpages) {
467 return (cluster->usage & 3) ? 0 : 1;
475 setup_arch(char **cmdline_p)
479 struct alpha_machine_vector *vec = NULL;
480 struct percpu_struct *cpu;
481 char *type_name, *var_name, *p;
482 void *kernel_end = _end; /* end of kernel */
483 char *args = command_line;
485 hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
486 boot_cpuid = hard_smp_processor_id();
489 * Pre-process the system type to make sure it will be valid.
491 * This may restore real CABRIO and EB66+ family names, ie
494 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
495 * and AS1200 (DIGITAL Server 5000 series) have the type as
496 * the negative of the real one.
498 if ((long)hwrpb->sys_type < 0) {
499 hwrpb->sys_type = -((long)hwrpb->sys_type);
500 hwrpb_update_checksum(hwrpb);
503 /* Register a call for panic conditions. */
504 atomic_notifier_chain_register(&panic_notifier_list,
507 #ifdef CONFIG_ALPHA_GENERIC
508 /* Assume that we've booted from SRM if we haven't booted from MILO.
509 Detect the later by looking for "MILO" in the system serial nr. */
510 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
513 /* If we are using SRM, we want to allow callbacks
514 as early as possible, so do this NOW, and then
515 they should work immediately thereafter.
517 kernel_end = callback_init(kernel_end);
520 * Locate the command line.
522 /* Hack for Jensen... since we're restricted to 8 or 16 chars for
523 boot flags depending on the boot mode, we need some shorthand.
524 This should do for installation. */
525 if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
526 strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
528 strlcpy(command_line, COMMAND_LINE, sizeof command_line);
530 strcpy(saved_command_line, command_line);
531 *cmdline_p = command_line;
534 * Process command-line arguments.
536 while ((p = strsep(&args, " \t")) != NULL) {
538 if (strncmp(p, "alpha_mv=", 9) == 0) {
539 vec = get_sysvec_byname(p+9);
542 if (strncmp(p, "cycle=", 6) == 0) {
543 est_cycle_freq = simple_strtol(p+6, NULL, 0);
546 if (strncmp(p, "mem=", 4) == 0) {
547 mem_size_limit = get_mem_size_limit(p+4);
550 if (strncmp(p, "srmcons", 7) == 0) {
554 if (strncmp(p, "console=srm", 11) == 0) {
558 if (strncmp(p, "gartsize=", 9) == 0) {
560 get_mem_size_limit(p+9) << PAGE_SHIFT;
563 #ifdef CONFIG_VERBOSE_MCHECK
564 if (strncmp(p, "verbose_mcheck=", 15) == 0) {
565 alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
571 /* Replace the command line, now that we've killed it with strsep. */
572 strcpy(command_line, saved_command_line);
574 /* If we want SRM console printk echoing early, do it now. */
575 if (alpha_using_srm && srmcons_output) {
576 register_srm_console();
579 * If "console=srm" was specified, clear the srmcons_output
580 * flag now so that time.c won't unregister_srm_console
582 if (srmcons_output & 2)
586 #ifdef CONFIG_MAGIC_SYSRQ
587 /* If we're using SRM, make sysrq-b halt back to the prom,
589 if (alpha_using_srm) {
590 struct sysrq_key_op *op = __sysrq_get_key_op('b');
591 op->handler = (void *) machine_halt;
596 * Identify and reconfigure for the current system.
598 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
600 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
601 cpu->type, &type_name, &var_name);
602 if (*var_name == '0')
606 vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
611 panic("Unsupported system type: %s%s%s (%ld %ld)\n",
612 type_name, (*var_name ? " variation " : ""), var_name,
613 hwrpb->sys_type, hwrpb->sys_variation);
615 if (vec != &alpha_mv) {
620 #ifdef CONFIG_ALPHA_GENERIC
623 "on %s%s%s using machine vector %s from %s\n",
624 type_name, (*var_name ? " variation " : ""),
625 var_name, alpha_mv.vector_name,
626 (alpha_using_srm ? "SRM" : "MILO"));
628 printk("Major Options: "
632 #ifdef CONFIG_ALPHA_EV56
635 #ifdef CONFIG_ALPHA_EV67
638 #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
641 #ifdef CONFIG_VERBOSE_MCHECK
645 #ifdef CONFIG_DISCONTIGMEM
652 #ifdef CONFIG_DEBUG_SPINLOCK
655 #ifdef CONFIG_MAGIC_SYSRQ
660 printk("Command line: %s\n", command_line);
664 * Save the SRM's current value for restoration.
666 srm_hae = *alpha_mv.hae_register;
667 __set_hae(alpha_mv.hae_cache);
669 /* Reset enable correctable error reports. */
672 /* Find our memory. */
673 setup_memory(kernel_end);
675 /* First guess at cpu cache sizes. Do this before init_arch. */
676 determine_cpu_caches(cpu->type);
678 /* Initialize the machine. Usually has to do with setting up
679 DMA windows and the like. */
680 if (alpha_mv.init_arch)
681 alpha_mv.init_arch();
683 /* Reserve standard resources. */
684 reserve_std_resources();
687 * Give us a default console. TGA users will see nothing until
688 * chr_dev_init is called, rather late in the boot sequence.
692 #if defined(CONFIG_VGA_CONSOLE)
693 conswitchp = &vga_con;
694 #elif defined(CONFIG_DUMMY_CONSOLE)
695 conswitchp = &dummy_con;
699 /* Default root filesystem to sda2. */
700 ROOT_DEV = Root_SDA2;
703 /* FIXME: only set this when we actually have EISA in this box? */
708 * Check ASN in HWRPB for validity, report if bad.
709 * FIXME: how was this failing? Should we trust it instead,
710 * and copy the value into alpha_mv.max_asn?
713 if (hwrpb->max_asn != MAX_ASN) {
714 printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
718 * Identify the flock of penguins.
728 disable_early_printk(void)
730 if (alpha_using_srm && srmcons_output) {
731 unregister_srm_console();
736 static char sys_unknown[] = "Unknown";
737 static char systype_names[][16] = {
739 "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
740 "Pelican", "Morgan", "Sable", "Medulla", "Noname",
741 "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
742 "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
743 "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
744 "Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
745 "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
748 static char unofficial_names[][8] = {"100", "Ruffian"};
750 static char api_names[][16] = {"200", "Nautilus"};
752 static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
753 static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
755 static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
756 static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
758 static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
759 static int eb64p_indices[] = {0,0,1,2};
761 static char eb66_names[][8] = {"EB66", "EB66+"};
762 static int eb66_indices[] = {0,0,1};
764 static char marvel_names[][16] = {
767 static int marvel_indices[] = { 0 };
769 static char rawhide_names[][16] = {
770 "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
772 static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
774 static char titan_names[][16] = {
775 "DEFAULT", "Privateer", "Falcon", "Granite"
777 static int titan_indices[] = {0,1,2,2,3};
779 static char tsunami_names[][16] = {
780 "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
781 "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
782 "Flying Clipper", "Shark"
784 static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
786 static struct alpha_machine_vector * __init
787 get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
789 static struct alpha_machine_vector *systype_vecs[] __initdata =
796 NULL, /* Mannequin */
800 NULL, /* Sable -- see below. */
803 NULL, /* Turbolaser */
806 NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
807 NULL, /* Tradewind */
808 NULL, /* Mikasa -- see below. */
810 NULL, /* EB66 -- see variation. */
811 NULL, /* EB64+ -- see variation. */
817 NULL, /* EB164 -- see variation. */
818 NULL, /* Noritake -- see below. */
825 NULL, /* Tsunami -- see variation. */
826 &wildfire_mv, /* Wildfire */
828 &eiger_mv, /* Eiger */
833 static struct alpha_machine_vector *unofficial_vecs[] __initdata =
839 static struct alpha_machine_vector *api_vecs[] __initdata =
845 static struct alpha_machine_vector *alcor_vecs[] __initdata =
847 &alcor_mv, &xlt_mv, &xlt_mv
850 static struct alpha_machine_vector *eb164_vecs[] __initdata =
852 &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
855 static struct alpha_machine_vector *eb64p_vecs[] __initdata =
859 &cabriolet_mv /* AlphaPCI64 */
862 static struct alpha_machine_vector *eb66_vecs[] __initdata =
868 static struct alpha_machine_vector *marvel_vecs[] __initdata =
873 static struct alpha_machine_vector *titan_vecs[] __initdata =
875 &titan_mv, /* default */
876 &privateer_mv, /* privateer */
877 &titan_mv, /* falcon */
878 &privateer_mv, /* granite */
881 static struct alpha_machine_vector *tsunami_vecs[] __initdata =
884 &dp264_mv, /* dp264 */
885 &dp264_mv, /* warhol */
886 &dp264_mv, /* windjammer */
887 &monet_mv, /* monet */
888 &clipper_mv, /* clipper */
889 &dp264_mv, /* goldrush */
890 &webbrick_mv, /* webbrick */
891 &dp264_mv, /* catamaran */
892 NULL, /* brisbane? */
893 NULL, /* melbourne? */
894 NULL, /* flying clipper? */
895 &shark_mv, /* shark */
898 /* ??? Do we need to distinguish between Rawhides? */
900 struct alpha_machine_vector *vec;
902 /* Search the system tables first... */
904 if (type < N(systype_vecs)) {
905 vec = systype_vecs[type];
906 } else if ((type > ST_API_BIAS) &&
907 (type - ST_API_BIAS) < N(api_vecs)) {
908 vec = api_vecs[type - ST_API_BIAS];
909 } else if ((type > ST_UNOFFICIAL_BIAS) &&
910 (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) {
911 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
914 /* If we've not found one, try for a variation. */
917 /* Member ID is a bit-field. */
918 unsigned long member = (variation >> 10) & 0x3f;
920 cpu &= 0xffffffff; /* make it usable */
924 if (member < N(alcor_indices))
925 vec = alcor_vecs[alcor_indices[member]];
928 if (member < N(eb164_indices))
929 vec = eb164_vecs[eb164_indices[member]];
930 /* PC164 may show as EB164 variation with EV56 CPU,
931 but, since no true EB164 had anything but EV5... */
932 if (vec == &eb164_mv && cpu == EV56_CPU)
936 if (member < N(eb64p_indices))
937 vec = eb64p_vecs[eb64p_indices[member]];
940 if (member < N(eb66_indices))
941 vec = eb66_vecs[eb66_indices[member]];
944 if (member < N(marvel_indices))
945 vec = marvel_vecs[marvel_indices[member]];
948 vec = titan_vecs[0]; /* default */
949 if (member < N(titan_indices))
950 vec = titan_vecs[titan_indices[member]];
953 if (member < N(tsunami_indices))
954 vec = tsunami_vecs[tsunami_indices[member]];
957 if (cpu == EV5_CPU || cpu == EV56_CPU)
958 vec = &mikasa_primo_mv;
962 case ST_DEC_NORITAKE:
963 if (cpu == EV5_CPU || cpu == EV56_CPU)
964 vec = &noritake_primo_mv;
968 case ST_DEC_2100_A500:
969 if (cpu == EV5_CPU || cpu == EV56_CPU)
970 vec = &sable_gamma_mv;
979 static struct alpha_machine_vector * __init
980 get_sysvec_byname(const char *name)
982 static struct alpha_machine_vector *all_vecs[] __initdata =
1025 for (i = 0; i < N(all_vecs); ++i) {
1026 struct alpha_machine_vector *mv = all_vecs[i];
1027 if (strcasecmp(mv->vector_name, name) == 0)
1034 get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1035 char **type_name, char **variation_name)
1037 unsigned long member;
1039 /* If not in the tables, make it UNKNOWN,
1040 else set type name to family */
1041 if (type < N(systype_names)) {
1042 *type_name = systype_names[type];
1043 } else if ((type > ST_API_BIAS) &&
1044 (type - ST_API_BIAS) < N(api_names)) {
1045 *type_name = api_names[type - ST_API_BIAS];
1046 } else if ((type > ST_UNOFFICIAL_BIAS) &&
1047 (type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) {
1048 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
1050 *type_name = sys_unknown;
1051 *variation_name = sys_unknown;
1055 /* Set variation to "0"; if variation is zero, done. */
1056 *variation_name = systype_names[0];
1057 if (variation == 0) {
1061 member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
1063 cpu &= 0xffffffff; /* make it usable */
1065 switch (type) { /* select by family */
1066 default: /* default to variation "0" for now */
1069 if (member < N(eb164_indices))
1070 *variation_name = eb164_names[eb164_indices[member]];
1071 /* PC164 may show as EB164 variation, but with EV56 CPU,
1072 so, since no true EB164 had anything but EV5... */
1073 if (eb164_indices[member] == 0 && cpu == EV56_CPU)
1074 *variation_name = eb164_names[1]; /* make it PC164 */
1077 if (member < N(alcor_indices))
1078 *variation_name = alcor_names[alcor_indices[member]];
1081 if (member < N(eb64p_indices))
1082 *variation_name = eb64p_names[eb64p_indices[member]];
1085 if (member < N(eb66_indices))
1086 *variation_name = eb66_names[eb66_indices[member]];
1089 if (member < N(marvel_indices))
1090 *variation_name = marvel_names[marvel_indices[member]];
1092 case ST_DEC_RAWHIDE:
1093 if (member < N(rawhide_indices))
1094 *variation_name = rawhide_names[rawhide_indices[member]];
1097 *variation_name = titan_names[0]; /* default */
1098 if (member < N(titan_indices))
1099 *variation_name = titan_names[titan_indices[member]];
1101 case ST_DEC_TSUNAMI:
1102 if (member < N(tsunami_indices))
1103 *variation_name = tsunami_names[tsunami_indices[member]];
1109 * A change was made to the HWRPB via an ECO and the following code
1110 * tracks a part of the ECO. In HWRPB versions less than 5, the ECO
1111 * was not implemented in the console firmware. If it's revision 5 or
1112 * greater we can get the name of the platform as an ASCII string from
1113 * the HWRPB. That's what this function does. It checks the revision
1114 * level and if the string is in the HWRPB it returns the address of
1115 * the string--a pointer to the name of the platform.
1118 * - Pointer to a ASCII string if it's in the HWRPB
1119 * - Pointer to a blank string if the data is not in the HWRPB.
1123 platform_string(void)
1125 struct dsr_struct *dsr;
1126 static char unk_system_string[] = "N/A";
1128 /* Go to the console for the string pointer.
1129 * If the rpb_vers is not 5 or greater the rpb
1130 * is old and does not have this data in it.
1132 if (hwrpb->revision < 5)
1133 return (unk_system_string);
1135 /* The Dynamic System Recognition struct
1136 * has the system platform name starting
1137 * after the character count of the string.
1139 dsr = ((struct dsr_struct *)
1140 ((char *)hwrpb + hwrpb->dsr_offset));
1141 return ((char *)dsr + (dsr->sysname_off +
1147 get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
1149 struct percpu_struct *cpu;
1153 for (i = 0; i < num; i++) {
1154 cpu = (struct percpu_struct *)
1155 ((char *)cpubase + i*hwrpb->processor_size);
1156 if ((cpu->flags & 0x1cc) == 0x1cc)
1163 show_cache_size (struct seq_file *f, const char *which, int shape)
1166 seq_printf (f, "%s\t\t: n/a\n", which);
1167 else if (shape == 0)
1168 seq_printf (f, "%s\t\t: unknown\n", which);
1170 seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
1171 which, shape >> 10, shape & 15,
1172 1 << ((shape >> 4) & 15));
1176 show_cpuinfo(struct seq_file *f, void *slot)
1178 extern struct unaligned_stat {
1179 unsigned long count, va, pc;
1182 static char cpu_names[][8] = {
1183 "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
1184 "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
1185 "EV68CX", "EV7", "EV79", "EV69"
1188 struct percpu_struct *cpu = slot;
1189 unsigned int cpu_index;
1192 char *sysvariation_name;
1195 cpu_index = (unsigned) (cpu->type - 1);
1196 cpu_name = "Unknown";
1197 if (cpu_index < N(cpu_names))
1198 cpu_name = cpu_names[cpu_index];
1200 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
1201 cpu->type, &systype_name, &sysvariation_name);
1203 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1205 seq_printf(f, "cpu\t\t\t: Alpha\n"
1206 "cpu model\t\t: %s\n"
1207 "cpu variation\t\t: %ld\n"
1208 "cpu revision\t\t: %ld\n"
1209 "cpu serial number\t: %s\n"
1210 "system type\t\t: %s\n"
1211 "system variation\t: %s\n"
1212 "system revision\t\t: %ld\n"
1213 "system serial number\t: %s\n"
1214 "cycle frequency [Hz]\t: %lu %s\n"
1215 "timer frequency [Hz]\t: %lu.%02lu\n"
1216 "page size [bytes]\t: %ld\n"
1217 "phys. address bits\t: %ld\n"
1218 "max. addr. space #\t: %ld\n"
1219 "BogoMIPS\t\t: %lu.%02lu\n"
1220 "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1221 "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1222 "platform string\t\t: %s\n"
1223 "cpus detected\t\t: %d\n",
1224 cpu_name, cpu->variation, cpu->revision,
1225 (char*)cpu->serial_no,
1226 systype_name, sysvariation_name, hwrpb->sys_revision,
1228 est_cycle_freq ? : hwrpb->cycle_freq,
1229 est_cycle_freq ? "est." : "",
1230 hwrpb->intr_freq / 4096,
1231 (100 * hwrpb->intr_freq / 4096) % 100,
1235 loops_per_jiffy / (500000/HZ),
1236 (loops_per_jiffy / (5000/HZ)) % 100,
1237 unaligned[0].count, unaligned[0].pc, unaligned[0].va,
1238 unaligned[1].count, unaligned[1].pc, unaligned[1].va,
1239 platform_string(), nr_processors);
1242 seq_printf(f, "cpus active\t\t: %d\n"
1243 "cpu active mask\t\t: %016lx\n",
1244 num_online_cpus(), cpus_addr(cpu_possible_map)[0]);
1247 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
1248 show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
1249 show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
1250 show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
1256 read_mem_block(int *addr, int stride, int size)
1258 long nloads = size / stride, cnt, tmp;
1260 __asm__ __volatile__(
1264 /* Next two XORs introduce an explicit data dependency between
1265 consecutive loads in the loop, which will give us true load
1273 : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
1274 : "r" (stride), "1" (nloads), "2" (addr));
1276 return cnt / (size / stride);
1279 #define CSHAPE(totalsize, linesize, assoc) \
1280 ((totalsize & ~0xff) | (linesize << 4) | assoc)
1282 /* ??? EV5 supports up to 64M, but did the systems with more than
1283 16M of BCACHE ever exist? */
1284 #define MAX_BCACHE_SIZE 16*1024*1024
1286 /* Note that the offchip caches are direct mapped on all Alphas. */
1288 external_cache_probe(int minsize, int width)
1290 int cycles, prev_cycles = 1000000;
1291 int stride = 1 << width;
1292 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
1294 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
1295 maxsize = 1 << (floor_log2(max_low_pfn + 1) + PAGE_SHIFT);
1297 /* Get the first block cached. */
1298 read_mem_block(__va(0), stride, size);
1300 while (size < maxsize) {
1301 /* Get an average load latency in cycles. */
1302 cycles = read_mem_block(__va(0), stride, size);
1303 if (cycles > prev_cycles * 2) {
1304 /* Fine, we exceed the cache. */
1305 printk("%ldK Bcache detected; load hit latency %d "
1306 "cycles, load miss latency %d cycles\n",
1307 size >> 11, prev_cycles, cycles);
1308 return CSHAPE(size >> 1, width, 1);
1310 /* Try to get the next block cached. */
1311 read_mem_block(__va(size), stride, size);
1312 prev_cycles = cycles;
1315 return -1; /* No BCACHE found. */
1319 determine_cpu_caches (unsigned int cpu_type)
1321 int L1I, L1D, L2, L3;
1327 if (cpu_type == EV4_CPU)
1328 L1I = CSHAPE(8*1024, 5, 1);
1330 L1I = CSHAPE(16*1024, 5, 1);
1334 /* BIU_CTL is a write-only Abox register. PALcode has a
1335 shadow copy, and may be available from some versions
1336 of the CSERVE PALcall. If we can get it, then
1338 unsigned long biu_ctl, size;
1339 size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
1340 L2 = CSHAPE (size, 5, 1);
1342 Unfortunately, we can't rely on that.
1344 L2 = external_cache_probe(128*1024, 5);
1350 unsigned long car, size;
1352 L1I = L1D = CSHAPE(8*1024, 5, 1);
1355 car = *(vuip) phys_to_virt (0x120000078UL);
1356 size = 64*1024 * (1 << ((car >> 5) & 7));
1357 /* No typo -- 8 byte cacheline size. Whodathunk. */
1358 L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
1365 unsigned long sc_ctl, width;
1367 L1I = L1D = CSHAPE(8*1024, 5, 1);
1369 /* Check the line size of the Scache. */
1370 sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
1371 width = sc_ctl & 0x1000 ? 6 : 5;
1372 L2 = CSHAPE (96*1024, width, 3);
1374 /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode
1375 has a shadow copy, and may be available from some versions
1376 of the CSERVE PALcall. If we can get it, then
1378 unsigned long bc_control, bc_config, size;
1379 size = 1024*1024 * (1 << ((bc_config & 7) - 1));
1380 L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
1382 Unfortunately, we can't rely on that.
1384 L3 = external_cache_probe(1024*1024, width);
1391 unsigned long cbox_config, size;
1393 if (cpu_type == PCA56_CPU) {
1394 L1I = CSHAPE(16*1024, 6, 1);
1395 L1D = CSHAPE(8*1024, 5, 1);
1397 L1I = CSHAPE(32*1024, 6, 2);
1398 L1D = CSHAPE(16*1024, 5, 1);
1402 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1403 size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1406 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1408 L2 = external_cache_probe(512*1024, 6);
1419 L1I = L1D = CSHAPE(64*1024, 6, 2);
1420 L2 = external_cache_probe(1024*1024, 6);
1426 L1I = L1D = CSHAPE(64*1024, 6, 2);
1427 L2 = CSHAPE(7*1024*1024/4, 6, 7);
1432 /* Nothing known about this cpu type. */
1433 L1I = L1D = L2 = L3 = 0;
1437 alpha_l1i_cacheshape = L1I;
1438 alpha_l1d_cacheshape = L1D;
1439 alpha_l2_cacheshape = L2;
1440 alpha_l3_cacheshape = L3;
1444 * We show only CPU #0 info.
1447 c_start(struct seq_file *f, loff_t *pos)
1449 return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
1453 c_next(struct seq_file *f, void *v, loff_t *pos)
1459 c_stop(struct seq_file *f, void *v)
1463 struct seq_operations cpuinfo_op = {
1467 .show = show_cpuinfo,
1472 alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1475 /* FIXME FIXME FIXME */
1476 /* If we are using SRM and serial console, just hard halt here. */
1477 if (alpha_using_srm && srmcons_output)
1483 static __init int add_pcspkr(void)
1485 struct platform_device *pd;
1488 pd = platform_device_alloc("pcspkr", -1);
1492 ret = platform_device_add(pd);
1494 platform_device_put(pd);
1498 device_initcall(add_pcspkr);