2 * Procedures for interfacing to Open Firmware.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <asm/types.h>
21 #include <asm/abs_addr.h>
27 void lmb_dump_all(void)
32 udbg_printf("lmb_dump_all:\n");
33 udbg_printf(" memory.cnt = 0x%lx\n",
35 udbg_printf(" memory.size = 0x%lx\n",
37 for (i=0; i < lmb.memory.cnt ;i++) {
38 udbg_printf(" memory.region[0x%x].base = 0x%lx\n",
39 i, lmb.memory.region[i].base);
40 udbg_printf(" .size = 0x%lx\n",
41 lmb.memory.region[i].size);
44 udbg_printf("\n reserved.cnt = 0x%lx\n",
46 udbg_printf(" reserved.size = 0x%lx\n",
48 for (i=0; i < lmb.reserved.cnt ;i++) {
49 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n",
50 i, lmb.reserved.region[i].base);
51 udbg_printf(" .size = 0x%lx\n",
52 lmb.reserved.region[i].size);
57 static unsigned long __init
58 lmb_addrs_overlap(unsigned long base1, unsigned long size1,
59 unsigned long base2, unsigned long size2)
61 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
65 lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
66 unsigned long base2, unsigned long size2)
68 if (base2 == base1 + size1)
70 else if (base1 == base2 + size2)
77 lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
79 unsigned long base1 = rgn->region[r1].base;
80 unsigned long size1 = rgn->region[r1].size;
81 unsigned long base2 = rgn->region[r2].base;
82 unsigned long size2 = rgn->region[r2].size;
84 return lmb_addrs_adjacent(base1, size1, base2, size2);
87 /* Assumption: base addr of region 1 < base addr of region 2 */
89 lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
93 rgn->region[r1].size += rgn->region[r2].size;
94 for (i=r2; i < rgn->cnt-1; i++) {
95 rgn->region[i].base = rgn->region[i+1].base;
96 rgn->region[i].size = rgn->region[i+1].size;
101 /* This routine called with relocation disabled. */
105 /* Create a dummy zero size LMB which will get coalesced away later.
106 * This simplifies the lmb_add() code below...
108 lmb.memory.region[0].base = 0;
109 lmb.memory.region[0].size = 0;
113 lmb.reserved.region[0].base = 0;
114 lmb.reserved.region[0].size = 0;
115 lmb.reserved.cnt = 1;
118 /* This routine called with relocation disabled. */
126 for (i = 0; i < lmb.memory.cnt; i++)
127 lmb.memory.size += lmb.memory.region[i].size;
130 /* This routine called with relocation disabled. */
132 lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
134 unsigned long i, coalesced = 0;
137 /* First try and coalesce this LMB with another. */
138 for (i=0; i < rgn->cnt; i++) {
139 unsigned long rgnbase = rgn->region[i].base;
140 unsigned long rgnsize = rgn->region[i].size;
142 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
143 if ( adjacent > 0 ) {
144 rgn->region[i].base -= size;
145 rgn->region[i].size += size;
149 else if ( adjacent < 0 ) {
150 rgn->region[i].size += size;
156 if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
157 lmb_coalesce_regions(rgn, i, i+1);
163 } else if ( rgn->cnt >= MAX_LMB_REGIONS ) {
167 /* Couldn't coalesce the LMB, so add it to the sorted table. */
168 for (i=rgn->cnt-1; i >= 0; i--) {
169 if (base < rgn->region[i].base) {
170 rgn->region[i+1].base = rgn->region[i].base;
171 rgn->region[i+1].size = rgn->region[i].size;
173 rgn->region[i+1].base = base;
174 rgn->region[i+1].size = size;
183 /* This routine called with relocation disabled. */
185 lmb_add(unsigned long base, unsigned long size)
187 struct lmb_region *_rgn = &(lmb.memory);
189 /* On pSeries LPAR systems, the first LMB is our RMO region. */
193 return lmb_add_region(_rgn, base, size);
198 lmb_reserve(unsigned long base, unsigned long size)
200 struct lmb_region *_rgn = &(lmb.reserved);
202 return lmb_add_region(_rgn, base, size);
206 lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
210 for (i=0; i < rgn->cnt; i++) {
211 unsigned long rgnbase = rgn->region[i].base;
212 unsigned long rgnsize = rgn->region[i].size;
213 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
218 return (i < rgn->cnt) ? i : -1;
222 lmb_alloc(unsigned long size, unsigned long align)
224 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
228 lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
231 unsigned long base = 0;
233 for (i=lmb.memory.cnt-1; i >= 0; i--) {
234 unsigned long lmbbase = lmb.memory.region[i].base;
235 unsigned long lmbsize = lmb.memory.region[i].size;
237 if ( max_addr == LMB_ALLOC_ANYWHERE )
238 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
239 else if ( lmbbase < max_addr )
240 base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align);
244 while ( (lmbbase <= base) &&
245 ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) {
246 base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align);
249 if ( (base != 0) && (lmbbase <= base) )
256 lmb_add_region(&lmb.reserved, base, size);
261 /* You must call lmb_analyze() before this. */
263 lmb_phys_mem_size(void)
265 return lmb.memory.size;
269 lmb_end_of_DRAM(void)
271 int idx = lmb.memory.cnt - 1;
273 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
277 * Truncate the lmb list to memory_limit if it's set
278 * You must call lmb_analyze() after this.
280 void __init lmb_enforce_memory_limit(void)
282 extern unsigned long memory_limit;
283 unsigned long i, limit;
288 limit = memory_limit;
289 for (i = 0; i < lmb.memory.cnt; i++) {
290 if (limit > lmb.memory.region[i].size) {
291 limit -= lmb.memory.region[i].size;
295 lmb.memory.region[i].size = limit;
296 lmb.memory.cnt = i + 1;