2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over.
5 * Getting sanitize_e820_map() in sync with i386 version by applying change:
6 * - Provisions for empty E820 memory regions (reported by certain BIOSes).
7 * Alex Achenbach <xela@slit.de>, December 2002.
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 #include <linux/config.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/bootmem.h>
16 #include <linux/ioport.h>
17 #include <linux/string.h>
18 #include <linux/kexec.h>
19 #include <linux/module.h>
23 #include <asm/proto.h>
24 #include <asm/bootsetup.h>
25 #include <asm/sections.h>
28 * PFN of last memory page.
30 unsigned long end_pfn;
31 EXPORT_SYMBOL(end_pfn);
34 * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
35 * The direct mapping extends to end_pfn_map, so that we can directly access
36 * apertures, ACPI and other tables without having to play with fixmaps.
38 unsigned long end_pfn_map;
41 * Last pfn which the user wants to use.
43 unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
45 extern struct resource code_resource, data_resource;
47 /* Check for some hardcoded bad areas that early boot is not allowed to touch */
48 static inline int bad_addr(unsigned long *addrp, unsigned long size)
50 unsigned long addr = *addrp, last = addr + size;
52 /* various gunk below that needed for SMP startup */
58 /* direct mapping tables of the kernel */
59 if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
60 *addrp = table_end << PAGE_SHIFT;
65 #ifdef CONFIG_BLK_DEV_INITRD
66 if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
67 addr < INITRD_START+INITRD_SIZE) {
68 *addrp = INITRD_START + INITRD_SIZE;
72 /* kernel code + 640k memory hole (later should not be needed, but
73 be paranoid for now) */
74 if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
75 *addrp = __pa_symbol(&_end);
79 if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
80 *addrp = ebda_addr + ebda_size;
84 /* XXX ramdisk image here? */
89 * This function checks if any part of the range <start,end> is mapped
93 e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
96 for (i = 0; i < e820.nr_map; i++) {
97 struct e820entry *ei = &e820.map[i];
98 if (type && ei->type != type)
100 if (ei->addr >= end || ei->addr + ei->size <= start)
108 * This function checks if the entire range <start,end> is mapped with type.
110 * Note: this function only works correct if the e820 table is sorted and
111 * not-overlapping, which is the case
113 int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
116 for (i = 0; i < e820.nr_map; i++) {
117 struct e820entry *ei = &e820.map[i];
118 if (type && ei->type != type)
120 /* is the region (part) in overlap with the current region ?*/
121 if (ei->addr >= end || ei->addr + ei->size <= start)
124 /* if the region is at the beginning of <start,end> we move
125 * start to the end of the region since it's ok until there
127 if (ei->addr <= start)
128 start = ei->addr + ei->size;
129 /* if start is now at or beyond end, we're done, full coverage */
131 return 1; /* we're done */
137 * Find a free area in a specific range.
139 unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
142 for (i = 0; i < e820.nr_map; i++) {
143 struct e820entry *ei = &e820.map[i];
144 unsigned long addr = ei->addr, last;
145 if (ei->type != E820_RAM)
149 if (addr > ei->addr + ei->size)
151 while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
154 if (last > ei->addr + ei->size)
164 * Free bootmem based on the e820 table for a node.
166 void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
169 for (i = 0; i < e820.nr_map; i++) {
170 struct e820entry *ei = &e820.map[i];
171 unsigned long last, addr;
173 if (ei->type != E820_RAM ||
174 ei->addr+ei->size <= start ||
178 addr = round_up(ei->addr, PAGE_SIZE);
182 last = round_down(ei->addr + ei->size, PAGE_SIZE);
186 if (last > addr && last-addr >= PAGE_SIZE)
187 free_bootmem_node(pgdat, addr, last-addr);
192 * Find the highest page frame number we have available
194 unsigned long __init e820_end_of_ram(void)
197 unsigned long end_pfn = 0;
199 for (i = 0; i < e820.nr_map; i++) {
200 struct e820entry *ei = &e820.map[i];
201 unsigned long start, end;
203 start = round_up(ei->addr, PAGE_SIZE);
204 end = round_down(ei->addr + ei->size, PAGE_SIZE);
207 if (ei->type == E820_RAM) {
208 if (end > end_pfn<<PAGE_SHIFT)
209 end_pfn = end>>PAGE_SHIFT;
211 if (end > end_pfn_map<<PAGE_SHIFT)
212 end_pfn_map = end>>PAGE_SHIFT;
216 if (end_pfn > end_pfn_map)
217 end_pfn_map = end_pfn;
218 if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
219 end_pfn_map = MAXMEM>>PAGE_SHIFT;
220 if (end_pfn > end_user_pfn)
221 end_pfn = end_user_pfn;
222 if (end_pfn > end_pfn_map)
223 end_pfn = end_pfn_map;
229 * Compute how much memory is missing in a range.
230 * Unlike the other functions in this file the arguments are in page numbers.
233 e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
235 unsigned long ram = 0;
236 unsigned long start = start_pfn << PAGE_SHIFT;
237 unsigned long end = end_pfn << PAGE_SHIFT;
239 for (i = 0; i < e820.nr_map; i++) {
240 struct e820entry *ei = &e820.map[i];
241 unsigned long last, addr;
243 if (ei->type != E820_RAM ||
244 ei->addr+ei->size <= start ||
248 addr = round_up(ei->addr, PAGE_SIZE);
252 last = round_down(ei->addr + ei->size, PAGE_SIZE);
259 return ((end - start) - ram) >> PAGE_SHIFT;
263 * Mark e820 reserved areas as busy for the resource manager.
265 void __init e820_reserve_resources(void)
268 for (i = 0; i < e820.nr_map; i++) {
269 struct resource *res;
270 res = alloc_bootmem_low(sizeof(struct resource));
271 switch (e820.map[i].type) {
272 case E820_RAM: res->name = "System RAM"; break;
273 case E820_ACPI: res->name = "ACPI Tables"; break;
274 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
275 default: res->name = "reserved";
277 res->start = e820.map[i].addr;
278 res->end = res->start + e820.map[i].size - 1;
279 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
280 request_resource(&iomem_resource, res);
281 if (e820.map[i].type == E820_RAM) {
283 * We don't know which RAM region contains kernel data,
284 * so we try it repeatedly and let the resource manager
287 request_resource(res, &code_resource);
288 request_resource(res, &data_resource);
290 request_resource(res, &crashk_res);
297 * Add a memory region to the kernel e820 map.
299 void __init add_memory_region(unsigned long start, unsigned long size, int type)
304 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
308 e820.map[x].addr = start;
309 e820.map[x].size = size;
310 e820.map[x].type = type;
314 void __init e820_print_map(char *who)
318 for (i = 0; i < e820.nr_map; i++) {
319 printk(" %s: %016Lx - %016Lx ", who,
320 (unsigned long long) e820.map[i].addr,
321 (unsigned long long) (e820.map[i].addr + e820.map[i].size));
322 switch (e820.map[i].type) {
323 case E820_RAM: printk("(usable)\n");
326 printk("(reserved)\n");
329 printk("(ACPI data)\n");
332 printk("(ACPI NVS)\n");
334 default: printk("type %u\n", e820.map[i].type);
341 * Sanitize the BIOS e820 map.
343 * Some e820 responses include overlapping entries. The following
344 * replaces the original e820 map with a new one, removing overlaps.
347 static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
349 struct change_member {
350 struct e820entry *pbios; /* pointer to original bios entry */
351 unsigned long long addr; /* address for this change point */
353 static struct change_member change_point_list[2*E820MAX] __initdata;
354 static struct change_member *change_point[2*E820MAX] __initdata;
355 static struct e820entry *overlap_list[E820MAX] __initdata;
356 static struct e820entry new_bios[E820MAX] __initdata;
357 struct change_member *change_tmp;
358 unsigned long current_type, last_type;
359 unsigned long long last_addr;
360 int chgidx, still_changing;
363 int old_nr, new_nr, chg_nr;
367 Visually we're performing the following (1,2,3,4 = memory types)...
369 Sample memory map (w/overlaps):
370 ____22__________________
371 ______________________4_
372 ____1111________________
373 _44_____________________
374 11111111________________
375 ____________________33__
376 ___________44___________
377 __________33333_________
378 ______________22________
379 ___________________2222_
380 _________111111111______
381 _____________________11_
382 _________________4______
384 Sanitized equivalent (no overlap):
385 1_______________________
386 _44_____________________
387 ___1____________________
388 ____22__________________
389 ______11________________
390 _________1______________
391 __________3_____________
392 ___________44___________
393 _____________33_________
394 _______________2________
395 ________________1_______
396 _________________4______
397 ___________________2____
398 ____________________33__
399 ______________________4_
402 /* if there's only one memory region, don't bother */
408 /* bail out if we find any unreasonable addresses in bios map */
409 for (i=0; i<old_nr; i++)
410 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
413 /* create pointers for initial change-point information (for sorting) */
414 for (i=0; i < 2*old_nr; i++)
415 change_point[i] = &change_point_list[i];
417 /* record all known change-points (starting and ending addresses),
418 omitting those that are for empty memory regions */
420 for (i=0; i < old_nr; i++) {
421 if (biosmap[i].size != 0) {
422 change_point[chgidx]->addr = biosmap[i].addr;
423 change_point[chgidx++]->pbios = &biosmap[i];
424 change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
425 change_point[chgidx++]->pbios = &biosmap[i];
430 /* sort change-point list by memory addresses (low -> high) */
432 while (still_changing) {
434 for (i=1; i < chg_nr; i++) {
435 /* if <current_addr> > <last_addr>, swap */
436 /* or, if current=<start_addr> & last=<end_addr>, swap */
437 if ((change_point[i]->addr < change_point[i-1]->addr) ||
438 ((change_point[i]->addr == change_point[i-1]->addr) &&
439 (change_point[i]->addr == change_point[i]->pbios->addr) &&
440 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
443 change_tmp = change_point[i];
444 change_point[i] = change_point[i-1];
445 change_point[i-1] = change_tmp;
451 /* create a new bios memory map, removing overlaps */
452 overlap_entries=0; /* number of entries in the overlap table */
453 new_bios_entry=0; /* index for creating new bios map entries */
454 last_type = 0; /* start with undefined memory type */
455 last_addr = 0; /* start with 0 as last starting address */
456 /* loop through change-points, determining affect on the new bios map */
457 for (chgidx=0; chgidx < chg_nr; chgidx++)
459 /* keep track of all overlapping bios entries */
460 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
462 /* add map entry to overlap list (> 1 entry implies an overlap) */
463 overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
467 /* remove entry from list (order independent, so swap with last) */
468 for (i=0; i<overlap_entries; i++)
470 if (overlap_list[i] == change_point[chgidx]->pbios)
471 overlap_list[i] = overlap_list[overlap_entries-1];
475 /* if there are overlapping entries, decide which "type" to use */
476 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
478 for (i=0; i<overlap_entries; i++)
479 if (overlap_list[i]->type > current_type)
480 current_type = overlap_list[i]->type;
481 /* continue building up new bios map based on this information */
482 if (current_type != last_type) {
483 if (last_type != 0) {
484 new_bios[new_bios_entry].size =
485 change_point[chgidx]->addr - last_addr;
486 /* move forward only if the new size was non-zero */
487 if (new_bios[new_bios_entry].size != 0)
488 if (++new_bios_entry >= E820MAX)
489 break; /* no more space left for new bios entries */
491 if (current_type != 0) {
492 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
493 new_bios[new_bios_entry].type = current_type;
494 last_addr=change_point[chgidx]->addr;
496 last_type = current_type;
499 new_nr = new_bios_entry; /* retain count for new bios entries */
501 /* copy new bios mapping into original location */
502 memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
509 * Copy the BIOS e820 map into a safe place.
511 * Sanity-check it while we're at it..
513 * If we're lucky and live on a modern system, the setup code
514 * will have given us a memory map that we can use to properly
515 * set up memory. If we aren't, we'll fake a memory map.
517 * We check to see that the memory map contains at least 2 elements
518 * before we'll use it, because the detection code in setup.S may
519 * not be perfect and most every PC known to man has two memory
520 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
521 * thinkpad 560x, for example, does not cooperate with the memory
524 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
526 /* Only one memory region (or negative)? Ignore it */
531 unsigned long start = biosmap->addr;
532 unsigned long size = biosmap->size;
533 unsigned long end = start + size;
534 unsigned long type = biosmap->type;
536 /* Overflow in 64 bits? Ignore the memory map. */
541 * Some BIOSes claim RAM in the 640k - 1M region.
542 * Not right. Fix it up.
544 * This should be removed on Hammer which is supposed to not
545 * have non e820 covered ISA mappings there, but I had some strange
546 * problems so it stays for now. -AK
548 if (type == E820_RAM) {
549 if (start < 0x100000ULL && end > 0xA0000ULL) {
550 if (start < 0xA0000ULL)
551 add_memory_region(start, 0xA0000ULL-start, type);
552 if (end <= 0x100000ULL)
559 add_memory_region(start, size, type);
560 } while (biosmap++,--nr_map);
564 void __init setup_memory_region(void)
566 char *who = "BIOS-e820";
569 * Try to copy the BIOS-supplied E820-map.
571 * Otherwise fake a memory map; one section from 0k->640k,
572 * the next section from 1mb->appropriate_mem_k
574 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
575 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
576 unsigned long mem_size;
578 /* compare results from other methods and take the greater */
579 if (ALT_MEM_K < EXT_MEM_K) {
580 mem_size = EXT_MEM_K;
583 mem_size = ALT_MEM_K;
588 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
589 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
591 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
595 void __init parse_memopt(char *p, char **from)
597 end_user_pfn = memparse(p, from);
598 end_user_pfn >>= PAGE_SHIFT;
601 void __init parse_memmapopt(char *p, char **from)
603 unsigned long long start_at, mem_size;
605 mem_size = memparse(p, from);
608 start_at = memparse(p+1, from);
609 add_memory_region(start_at, mem_size, E820_RAM);
610 } else if (*p == '#') {
611 start_at = memparse(p+1, from);
612 add_memory_region(start_at, mem_size, E820_ACPI);
613 } else if (*p == '$') {
614 start_at = memparse(p+1, from);
615 add_memory_region(start_at, mem_size, E820_RESERVED);
617 end_user_pfn = (mem_size >> PAGE_SHIFT);
622 unsigned long pci_mem_start = 0xaeedbabe;
623 EXPORT_SYMBOL(pci_mem_start);
626 * Search for the biggest gap in the low 32 bits of the e820
627 * memory space. We pass this space to PCI to assign MMIO resources
628 * for hotplug or unconfigured devices in.
629 * Hopefully the BIOS let enough space left.
631 __init void e820_setup_gap(void)
633 unsigned long gapstart, gapsize, round;
638 last = 0x100000000ull;
639 gapstart = 0x10000000;
643 unsigned long long start = e820.map[i].addr;
644 unsigned long long end = start + e820.map[i].size;
647 * Since "last" is at most 4GB, we know we'll
648 * fit in 32 bits if this condition is true
651 unsigned long gap = last - end;
664 gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
665 printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
666 KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
670 * See how much we want to round up: start off with
671 * rounding to the next 1MB area.
674 while ((gapsize >> 4) > round)
676 /* Fun with two's complement */
677 pci_mem_start = (gapstart + round) & -round;
679 printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
680 pci_mem_start, gapstart, gapsize);