2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "wine/port.h"
29 #include <sys/types.h>
30 #ifdef HAVE_SYS_MMAN_H
40 #include "wine/library.h"
41 #include "wine/list.h"
50 static struct list reserved_areas = LIST_INIT(reserved_areas);
51 static const int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
54 #define MAP_NORESERVE 0
58 static inline int munmap( void *ptr, size_t size ) { return 0; }
62 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
63 /***********************************************************************
66 * The purpose of this routine is to emulate the behaviour of
67 * the Linux mmap() routine if a non-NULL address is passed,
68 * but the MAP_FIXED flag is not set. Linux in this case tries
69 * to place the mapping at the specified address, *unless* the
70 * range is already in use. Solaris, however, completely ignores
71 * the address argument in this case.
73 * As Wine code occasionally relies on the Linux behaviour, e.g. to
74 * be able to map non-relocateable PE executables to their proper
75 * start addresses, or to map the DOS memory to 0, this routine
76 * emulates the Linux behaviour by checking whether the desired
77 * address range is still available, and placing the mapping there
78 * using MAP_FIXED if so.
80 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
81 int fildes, off_t off)
83 char * volatile result = NULL;
84 int pagesize = getpagesize();
87 /* We only try to map to a fixed address if
88 addr is non-NULL and properly aligned,
89 and MAP_FIXED isn't already specified. */
93 if ( (uintptr_t)addr & (pagesize-1) )
95 if ( flags & MAP_FIXED )
98 /* We use vfork() to freeze all threads of the
99 current process. This allows us to check without
100 race condition whether the desired memory range is
101 already in use. Note that because vfork() shares
102 the address spaces between parent and child, we
103 can actually perform the mapping in the child. */
105 if ( (pid = vfork()) == -1 )
107 perror("try_mmap_fixed: vfork");
115 /* We call mincore() for every page in the desired range.
116 If any of these calls succeeds, the page is already
117 mapped and we must fail. */
118 for ( i = 0; i < len; i += pagesize )
119 if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
122 /* Perform the mapping with MAP_FIXED set. This is safe
123 now, as none of the pages is currently in use. */
124 result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
125 if ( result == addr )
128 if ( result != (void *) -1 ) /* This should never happen ... */
129 munmap( result, len );
134 /* vfork() lets the parent continue only after the child
135 has exited. Furthermore, Wine sets SIGCHLD to SIG_IGN,
136 so we don't need to wait for the child. */
138 return result == addr;
140 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
143 /***********************************************************************
146 * Portable wrapper for anonymous mmaps
148 void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
151 static int fdzero = -1;
158 if ((fdzero = open( "/dev/zero", O_RDONLY )) == -1)
160 perror( "/dev/zero: open" );
164 #endif /* MAP_ANON */
167 flags &= ~MAP_SHARED;
170 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
172 flags |= MAP_PRIVATE;
175 if (!(flags & MAP_FIXED))
178 /* If available, this will attempt a fixed mapping in-kernel */
179 flags |= MAP_TRYFIXED;
180 #elif defined(__svr4__) || defined(__NetBSD__)
181 if ( try_mmap_fixed( start, size, prot, flags, fdzero, 0 ) )
185 return mmap( start, size, prot, flags, fdzero, 0 );
194 /***********************************************************************
197 * Reserve as much memory as possible in the given area.
198 * FIXME: probably needs a different algorithm for Solaris
200 static void reserve_area( void *addr, void *end )
203 size_t size = (char *)end - (char *)addr;
205 struct reserved_area *area;
207 if ((ptr = wine_anon_mmap( addr, size, PROT_NONE, MAP_NORESERVE )) != (void *)-1)
211 if (!end) size--; /* avoid wrap-around */
212 /* try to merge it with the previous one */
213 if ((prev = list_tail( &reserved_areas )))
215 area = LIST_ENTRY( prev, struct reserved_area, entry );
216 if (area && (char *)area->base + area->size == (char *)ptr)
222 /* create a new area */
223 if ((area = malloc( sizeof(*area) )))
227 list_add_tail( &reserved_areas, &area->entry );
231 else munmap( ptr, size );
233 if (size > granularity_mask + 1)
235 size_t new_size = (size / 2) & ~granularity_mask;
236 reserve_area( addr, (char *)addr + new_size );
237 reserve_area( (char *)addr + new_size, end );
242 /***********************************************************************
247 static char * const user_space_limit = (char *)0x80000000;
249 char * const stack_ptr = &stack;
251 if (stack_ptr >= user_space_limit)
253 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
254 if (base > user_space_limit) reserve_area( user_space_limit, base );
255 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
257 /* Linux heuristic: if the stack top is at c0000000, assume the address space */
258 /* ends there, this avoids a lot of futile allocation attempts */
259 if (base != (char *)0xc0000000)
261 reserve_area( base, 0 );
263 else reserve_area( user_space_limit, 0 );
274 /***********************************************************************
275 * wine_mmap_add_reserved_area
277 * Add an address range to the list of reserved areas.
278 * Caller must have made sure the range is not used by anything else.
280 * Note: the reserved areas functions are not reentrant, caller is
281 * responsible for proper locking.
283 void wine_mmap_add_reserved_area( void *addr, size_t size )
285 struct reserved_area *area;
288 if (!((char *)addr + size)) size--; /* avoid wrap-around */
291 /* blow away existing mappings */
292 wine_anon_mmap( addr, size, PROT_NONE, MAP_NORESERVE | MAP_FIXED );
295 LIST_FOR_EACH( ptr, &reserved_areas )
297 area = LIST_ENTRY( ptr, struct reserved_area, entry );
298 if (area->base > addr)
300 /* try to merge with the next one */
301 if ((char *)addr + size == (char *)area->base)
309 else if ((char *)area->base + area->size == (char *)addr)
311 /* merge with the previous one */
314 /* try to merge with the next one too */
315 if ((ptr = list_next( &reserved_areas, ptr )))
317 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
318 if ((char *)addr + size == (char *)next->base)
320 area->size += next->size;
321 list_remove( &next->entry );
329 if ((area = malloc( sizeof(*area) )))
333 list_add_before( ptr, &area->entry );
338 /***********************************************************************
339 * wine_mmap_remove_reserved_area
341 * Remove an address range from the list of reserved areas.
342 * If 'unmap' is non-zero the range is unmapped too.
344 * Note: the reserved areas functions are not reentrant, caller is
345 * responsible for proper locking.
347 void wine_mmap_remove_reserved_area( void *addr, size_t size, int unmap )
349 struct reserved_area *area;
352 if (!((char *)addr + size)) size--; /* avoid wrap-around */
354 ptr = list_head( &reserved_areas );
355 /* find the first area covering address */
358 area = LIST_ENTRY( ptr, struct reserved_area, entry );
359 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
360 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
362 if (area->base >= addr)
364 if ((char *)area->base + area->size > (char *)addr + size)
366 /* range overlaps beginning of area only -> shrink area */
367 if (unmap) munmap( area->base, (char *)addr + size - (char *)area->base );
368 area->size -= (char *)addr + size - (char *)area->base;
369 area->base = (char *)addr + size;
374 /* range contains the whole area -> remove area completely */
375 ptr = list_next( &reserved_areas, ptr );
376 if (unmap) munmap( area->base, area->size );
377 list_remove( &area->entry );
384 if ((char *)area->base + area->size > (char *)addr + size)
386 /* range is in the middle of area -> split area in two */
387 struct reserved_area *new_area = malloc( sizeof(*new_area) );
390 new_area->base = (char *)addr + size;
391 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
392 list_add_after( ptr, &new_area->entry );
394 else size = (char *)area->base + area->size - (char *)addr;
395 area->size = (char *)addr - (char *)area->base;
396 if (unmap) munmap( addr, size );
401 /* range overlaps end of area only -> shrink area */
402 if (unmap) munmap( addr, (char *)area->base + area->size - (char *)addr );
403 area->size = (char *)addr - (char *)area->base;
407 ptr = list_next( &reserved_areas, ptr );
412 /***********************************************************************
413 * wine_mmap_is_in_reserved_area
415 * Check if the specified range is included in a reserved area.
416 * Returns 1 if range is fully included, 0 if range is not included
417 * at all, and -1 if it is only partially included.
419 * Note: the reserved areas functions are not reentrant, caller is
420 * responsible for proper locking.
422 int wine_mmap_is_in_reserved_area( void *addr, size_t size )
424 struct reserved_area *area;
427 LIST_FOR_EACH( ptr, &reserved_areas )
429 area = LIST_ENTRY( ptr, struct reserved_area, entry );
430 if (area->base > addr) break;
431 if ((char *)area->base + area->size <= (char *)addr) continue;
432 /* area must contain block completely */
433 if ((char *)area->base + area->size < (char *)addr + size) return -1;