2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
30 #include <sys/types.h>
31 #ifdef HAVE_SYS_MMAN_H
41 #include "wine/library.h"
42 #include "wine/list.h"
53 static struct list reserved_areas = LIST_INIT(reserved_areas);
54 static const unsigned int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
57 #define MAP_NORESERVE 0
66 static inline int get_fdzero(void)
70 if (MAP_ANON == 0 && fd == -1)
72 if ((fd = open( "/dev/zero", O_RDONLY )) == -1)
74 perror( "/dev/zero: open" );
81 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
82 /***********************************************************************
85 * The purpose of this routine is to emulate the behaviour of
86 * the Linux mmap() routine if a non-NULL address is passed,
87 * but the MAP_FIXED flag is not set. Linux in this case tries
88 * to place the mapping at the specified address, *unless* the
89 * range is already in use. Solaris, however, completely ignores
90 * the address argument in this case.
92 * As Wine code occasionally relies on the Linux behaviour, e.g. to
93 * be able to map non-relocatable PE executables to their proper
94 * start addresses, or to map the DOS memory to 0, this routine
95 * emulates the Linux behaviour by checking whether the desired
96 * address range is still available, and placing the mapping there
97 * using MAP_FIXED if so.
99 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
100 int fildes, off_t off)
102 char * volatile result = NULL;
103 int pagesize = getpagesize();
106 /* We only try to map to a fixed address if
107 addr is non-NULL and properly aligned,
108 and MAP_FIXED isn't already specified. */
112 if ( (uintptr_t)addr & (pagesize-1) )
114 if ( flags & MAP_FIXED )
117 /* We use vfork() to freeze all threads of the
118 current process. This allows us to check without
119 race condition whether the desired memory range is
120 already in use. Note that because vfork() shares
121 the address spaces between parent and child, we
122 can actually perform the mapping in the child. */
124 if ( (pid = vfork()) == -1 )
126 perror("try_mmap_fixed: vfork");
134 /* We call mincore() for every page in the desired range.
135 If any of these calls succeeds, the page is already
136 mapped and we must fail. */
137 for ( i = 0; i < len; i += pagesize )
138 if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
141 /* Perform the mapping with MAP_FIXED set. This is safe
142 now, as none of the pages is currently in use. */
143 result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
144 if ( result == addr )
147 if ( result != (void *) -1 ) /* This should never happen ... */
148 munmap( result, len );
155 wret = waitpid(pid, NULL, 0);
156 } while (wret < 0 && errno == EINTR);
158 return result == addr;
161 #elif defined(__APPLE__)
163 #include <mach/mach_init.h>
164 #include <mach/vm_map.h>
167 * On Darwin, we can use the Mach call vm_allocate to allocate
168 * anonymous memory at the specified address, and then use mmap with
169 * MAP_FIXED to replace the mapping.
171 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
172 int fildes, off_t off)
174 vm_address_t result = (vm_address_t)addr;
176 if (!vm_allocate(mach_task_self(),&result,len,0))
178 if (mmap( (void *)result, len, prot, flags | MAP_FIXED, fildes, off ) != MAP_FAILED)
180 vm_deallocate(mach_task_self(),result,len);
185 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
188 /***********************************************************************
191 * Portable wrapper for anonymous mmaps
193 void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
196 flags &= ~MAP_SHARED;
199 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
200 flags |= MAP_PRIVATE | MAP_ANON;
202 if (!(flags & MAP_FIXED))
204 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
205 /* Even FreeBSD 5.3 does not properly support NULL here. */
206 if( start == NULL ) start = (void *)0x110000;
210 /* If available, this will attempt a fixed mapping in-kernel */
211 flags |= MAP_TRYFIXED;
212 #elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
213 if ( try_mmap_fixed( start, size, prot, flags, get_fdzero(), 0 ) )
217 return mmap( start, size, prot, flags, get_fdzero(), 0 );
221 /***********************************************************************
224 * mmap wrapper used for reservations, only maps the specified address
226 static inline int mmap_reserve( void *addr, size_t size )
229 int flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
232 flags |= MAP_TRYFIXED;
233 #elif defined(__APPLE__)
234 return try_mmap_fixed( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
236 ptr = mmap( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
237 if (ptr != addr && ptr != (void *)-1) munmap( ptr, size );
238 return (ptr == addr);
242 /***********************************************************************
245 * Reserve as much memory as possible in the given area.
247 static inline void reserve_area( void *addr, void *end )
249 size_t size = (char *)end - (char *)addr;
251 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
252 /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
253 /* we assume no other thread is running at this point */
254 size_t i, pagesize = getpagesize();
259 for (i = 0; i < size; i += pagesize)
260 if (mincore( (caddr_t)addr + i, pagesize, &vec ) != -1) break;
262 i &= ~granularity_mask;
263 if (i && mmap( addr, i, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
264 get_fdzero(), 0 ) != (void *)-1)
265 wine_mmap_add_reserved_area( addr, i );
267 i += granularity_mask + 1;
268 if ((char *)addr + i < (char *)addr) break; /* overflow */
269 addr = (char *)addr + i;
270 if (addr >= end) break;
271 size = (char *)end - (char *)addr;
276 if (mmap_reserve( addr, size ))
278 wine_mmap_add_reserved_area( addr, size );
281 size = (size / 2) & ~granularity_mask;
284 reserve_area( addr, (char *)addr + size );
285 reserve_area( (char *)addr + size, end );
291 /***********************************************************************
292 * reserve_malloc_space
294 * Solaris malloc is not smart enough to obtain space through mmap(), so try to make
295 * sure that there is some available sbrk() space before we reserve other things.
297 static inline void reserve_malloc_space( size_t size )
300 size_t i, count = size / 1024;
301 void **ptrs = malloc( count * sizeof(ptrs[0]) );
305 for (i = 0; i < count; i++) if (!(ptrs[i] = malloc( 1024 ))) break;
306 if (i--) /* free everything except the last one */
307 while (i) free( ptrs[--i] );
313 /***********************************************************************
316 * Reserve the DOS area (0x00000000-0x00110000).
318 static inline void reserve_dos_area(void)
320 const size_t page_size = getpagesize();
321 const size_t dos_area_size = 0x110000;
324 /* first page has to be handled specially */
325 ptr = wine_anon_mmap( (void *)page_size, dos_area_size - page_size, PROT_NONE, MAP_NORESERVE );
326 if (ptr != (void *)page_size)
328 if (ptr != (void *)-1) munmap( ptr, dos_area_size - page_size );
331 /* now add first page with MAP_FIXED */
332 wine_anon_mmap( NULL, page_size, PROT_NONE, MAP_NORESERVE|MAP_FIXED );
333 wine_mmap_add_reserved_area( NULL, dos_area_size );
337 /***********************************************************************
343 struct reserved_area *area;
346 char * const stack_ptr = &stack;
347 char *user_space_limit = (char *)0x7ffe0000;
349 reserve_malloc_space( 8 * 1024 * 1024 );
351 if (!list_head( &reserved_areas ))
353 /* if we don't have a preloader, try to reserve some space below 2Gb */
354 reserve_area( (void *)0x00110000, (void *)0x40000000 );
357 /* check for a reserved area starting at the user space limit */
358 /* to avoid wasting time trying to allocate it again */
359 LIST_FOR_EACH( ptr, &reserved_areas )
361 area = LIST_ENTRY( ptr, struct reserved_area, entry );
362 if ((char *)area->base > user_space_limit) break;
363 if ((char *)area->base + area->size > user_space_limit)
365 user_space_limit = (char *)area->base + area->size;
370 if (stack_ptr >= user_space_limit)
373 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
374 if (base > user_space_limit) reserve_area( user_space_limit, base );
375 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
376 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__)
377 /* Heuristic: assume the stack is near the end of the address */
378 /* space, this avoids a lot of futile allocation attempts */
379 end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
381 reserve_area( base, end );
383 else reserve_area( user_space_limit, 0 );
385 /* reserve the DOS area if not already done */
387 ptr = list_head( &reserved_areas );
390 area = LIST_ENTRY( ptr, struct reserved_area, entry );
391 if (!area->base) return; /* already reserved */
395 #elif defined(__x86_64__)
397 if (!list_head( &reserved_areas ))
399 /* if we don't have a preloader, try to reserve the space now */
400 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
401 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
402 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
409 /***********************************************************************
410 * wine_mmap_add_reserved_area
412 * Add an address range to the list of reserved areas.
413 * Caller must have made sure the range is not used by anything else.
415 * Note: the reserved areas functions are not reentrant, caller is
416 * responsible for proper locking.
418 void wine_mmap_add_reserved_area( void *addr, size_t size )
420 struct reserved_area *area;
423 if (!((char *)addr + size)) size--; /* avoid wrap-around */
425 LIST_FOR_EACH( ptr, &reserved_areas )
427 area = LIST_ENTRY( ptr, struct reserved_area, entry );
428 if (area->base > addr)
430 /* try to merge with the next one */
431 if ((char *)addr + size == (char *)area->base)
439 else if ((char *)area->base + area->size == (char *)addr)
441 /* merge with the previous one */
444 /* try to merge with the next one too */
445 if ((ptr = list_next( &reserved_areas, ptr )))
447 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
448 if ((char *)addr + size == (char *)next->base)
450 area->size += next->size;
451 list_remove( &next->entry );
459 if ((area = malloc( sizeof(*area) )))
463 list_add_before( ptr, &area->entry );
468 /***********************************************************************
469 * wine_mmap_remove_reserved_area
471 * Remove an address range from the list of reserved areas.
472 * If 'unmap' is non-zero the range is unmapped too.
474 * Note: the reserved areas functions are not reentrant, caller is
475 * responsible for proper locking.
477 void wine_mmap_remove_reserved_area( void *addr, size_t size, int unmap )
479 struct reserved_area *area;
482 if (!((char *)addr + size)) size--; /* avoid wrap-around */
484 ptr = list_head( &reserved_areas );
485 /* find the first area covering address */
488 area = LIST_ENTRY( ptr, struct reserved_area, entry );
489 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
490 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
492 if (area->base >= addr)
494 if ((char *)area->base + area->size > (char *)addr + size)
496 /* range overlaps beginning of area only -> shrink area */
497 if (unmap) munmap( area->base, (char *)addr + size - (char *)area->base );
498 area->size -= (char *)addr + size - (char *)area->base;
499 area->base = (char *)addr + size;
504 /* range contains the whole area -> remove area completely */
505 ptr = list_next( &reserved_areas, ptr );
506 if (unmap) munmap( area->base, area->size );
507 list_remove( &area->entry );
514 if ((char *)area->base + area->size > (char *)addr + size)
516 /* range is in the middle of area -> split area in two */
517 struct reserved_area *new_area = malloc( sizeof(*new_area) );
520 new_area->base = (char *)addr + size;
521 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
522 list_add_after( ptr, &new_area->entry );
524 else size = (char *)area->base + area->size - (char *)addr;
525 area->size = (char *)addr - (char *)area->base;
526 if (unmap) munmap( addr, size );
531 /* range overlaps end of area only -> shrink area */
532 if (unmap) munmap( addr, (char *)area->base + area->size - (char *)addr );
533 area->size = (char *)addr - (char *)area->base;
537 ptr = list_next( &reserved_areas, ptr );
542 /***********************************************************************
543 * wine_mmap_is_in_reserved_area
545 * Check if the specified range is included in a reserved area.
546 * Returns 1 if range is fully included, 0 if range is not included
547 * at all, and -1 if it is only partially included.
549 * Note: the reserved areas functions are not reentrant, caller is
550 * responsible for proper locking.
552 int wine_mmap_is_in_reserved_area( void *addr, size_t size )
554 struct reserved_area *area;
557 LIST_FOR_EACH( ptr, &reserved_areas )
559 area = LIST_ENTRY( ptr, struct reserved_area, entry );
560 if (area->base > addr) break;
561 if ((char *)area->base + area->size <= (char *)addr) continue;
562 /* area must contain block completely */
563 if ((char *)area->base + area->size < (char *)addr + size) return -1;
570 /***********************************************************************
571 * wine_mmap_enum_reserved_areas
573 * Enumerate the list of reserved areas, sorted by addresses.
574 * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
576 * Note: the reserved areas functions are not reentrant, caller is
577 * responsible for proper locking.
579 int wine_mmap_enum_reserved_areas( int (*enum_func)(void *base, size_t size, void *arg), void *arg,
587 for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
589 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
590 if ((ret = enum_func( area->base, area->size, arg ))) break;
595 for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
597 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
598 if ((ret = enum_func( area->base, area->size, arg ))) break;
604 #else /* HAVE_MMAP */