1 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 /* This memory manager is integrated into the global/local lru
35 * mechanisms used by the clients. Specifically, it operates by
36 * setting the 'in_use' fields of the global LRU to indicate whether
37 * this region is privately allocated to a client.
39 * This does require the client to actually respect that field.
41 * Currently no effort is made to allocate 'private' memory in any
42 * clever way - the LRU information isn't used to determine which
43 * block to allocate, and the ring is drained prior to allocations --
44 * in other words allocation is expensive.
46 static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
48 drm_i915_private_t *dev_priv = dev->dev_private;
49 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
50 drm_tex_region_t *list;
57 shift = dev_priv->tex_lru_log_granularity;
58 nr = I915_NR_TEX_REGIONS;
60 start = p->start >> shift;
61 end = (p->start + p->size - 1) >> shift;
63 age = ++sarea_priv->texAge;
64 list = sarea_priv->texList;
66 /* Mark the regions with the new flag and update their age. Move
67 * them to head of list to preserve LRU semantics.
69 for (i = start; i <= end; i++) {
70 list[i].in_use = in_use;
73 /* remove_from_list(i)
75 list[(unsigned)list[i].next].prev = list[i].prev;
76 list[(unsigned)list[i].prev].next = list[i].next;
78 /* insert_at_head(list, i)
81 list[i].next = list[nr].next;
82 list[(unsigned)list[nr].next].prev = i;
87 /* Very simple allocator for agp memory, working on a static range
88 * already mapped into each client's address space.
91 static struct mem_block *split_block(struct mem_block *p, int start, int size,
94 /* Maybe cut off the start of an existing block */
95 if (start > p->start) {
96 struct mem_block *newblock =
97 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
100 newblock->start = start;
101 newblock->size = p->size - (start - p->start);
102 newblock->filp = NULL;
103 newblock->next = p->next;
105 p->next->prev = newblock;
107 p->size -= newblock->size;
111 /* Maybe cut off the end of an existing block */
112 if (size < p->size) {
113 struct mem_block *newblock =
114 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
117 newblock->start = start + size;
118 newblock->size = p->size - size;
119 newblock->filp = NULL;
120 newblock->next = p->next;
122 p->next->prev = newblock;
128 /* Our block is in the middle */
133 static struct mem_block *alloc_block(struct mem_block *heap, int size,
134 int align2, DRMFILE filp)
137 int mask = (1 << align2) - 1;
139 for (p = heap->next; p != heap; p = p->next) {
140 int start = (p->start + mask) & ~mask;
141 if (p->filp == NULL && start + size <= p->start + p->size)
142 return split_block(p, start, size, filp);
148 static struct mem_block *find_block(struct mem_block *heap, int start)
152 for (p = heap->next; p != heap; p = p->next)
153 if (p->start == start)
159 static void free_block(struct mem_block *p)
163 /* Assumes a single contiguous range. Needs a special filp in
164 * 'heap' to stop it being subsumed.
166 if (p->next->filp == NULL) {
167 struct mem_block *q = p->next;
171 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
174 if (p->prev->filp == NULL) {
175 struct mem_block *q = p->prev;
179 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
183 /* Initialize. How to check for an uninitialized heap?
185 static int init_heap(struct mem_block **heap, int start, int size)
187 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
192 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
194 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
198 blocks->start = start;
201 blocks->next = blocks->prev = *heap;
203 memset(*heap, 0, sizeof(**heap));
204 (*heap)->filp = (DRMFILE) - 1;
205 (*heap)->next = (*heap)->prev = blocks;
209 /* Free all blocks associated with the releasing file.
211 void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
215 if (!heap || !heap->next)
218 for (p = heap->next; p != heap; p = p->next) {
219 if (p->filp == filp) {
221 mark_block(dev, p, 0);
225 /* Assumes a single contiguous range. Needs a special filp in
226 * 'heap' to stop it being subsumed.
228 for (p = heap->next; p != heap; p = p->next) {
229 while (p->filp == NULL && p->next->filp == NULL) {
230 struct mem_block *q = p->next;
234 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
241 void i915_mem_takedown(struct mem_block **heap)
248 for (p = (*heap)->next; p != *heap;) {
249 struct mem_block *q = p;
251 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
254 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
258 static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
261 case I915_MEM_REGION_AGP:
262 return &dev_priv->agp_heap;
270 int i915_mem_alloc(DRM_IOCTL_ARGS)
273 drm_i915_private_t *dev_priv = dev->dev_private;
274 drm_i915_mem_alloc_t alloc;
275 struct mem_block *block, **heap;
278 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
279 return DRM_ERR(EINVAL);
282 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
285 heap = get_heap(dev_priv, alloc.region);
287 return DRM_ERR(EFAULT);
289 /* Make things easier on ourselves: all allocations at least
292 if (alloc.alignment < 12)
293 alloc.alignment = 12;
295 block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
298 return DRM_ERR(ENOMEM);
300 mark_block(dev, block, 1);
302 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
303 DRM_ERROR("copy_to_user\n");
304 return DRM_ERR(EFAULT);
310 int i915_mem_free(DRM_IOCTL_ARGS)
313 drm_i915_private_t *dev_priv = dev->dev_private;
314 drm_i915_mem_free_t memfree;
315 struct mem_block *block, **heap;
318 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
319 return DRM_ERR(EINVAL);
322 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
325 heap = get_heap(dev_priv, memfree.region);
327 return DRM_ERR(EFAULT);
329 block = find_block(*heap, memfree.region_offset);
331 return DRM_ERR(EFAULT);
333 if (block->filp != filp)
334 return DRM_ERR(EPERM);
336 mark_block(dev, block, 0);
341 int i915_mem_init_heap(DRM_IOCTL_ARGS)
344 drm_i915_private_t *dev_priv = dev->dev_private;
345 drm_i915_mem_init_heap_t initheap;
346 struct mem_block **heap;
349 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
350 return DRM_ERR(EINVAL);
353 DRM_COPY_FROM_USER_IOCTL(initheap,
354 (drm_i915_mem_init_heap_t __user *) data,
357 heap = get_heap(dev_priv, initheap.region);
359 return DRM_ERR(EFAULT);
362 DRM_ERROR("heap already initialized?");
363 return DRM_ERR(EFAULT);
366 return init_heap(heap, initheap.start, initheap.size);
369 int i915_mem_destroy_heap( DRM_IOCTL_ARGS )
372 drm_i915_private_t *dev_priv = dev->dev_private;
373 drm_i915_mem_destroy_heap_t destroyheap;
374 struct mem_block **heap;
377 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
378 return DRM_ERR(EINVAL);
381 DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data,
382 sizeof(destroyheap) );
384 heap = get_heap( dev_priv, destroyheap.region );
386 DRM_ERROR("get_heap failed");
387 return DRM_ERR(EFAULT);
391 DRM_ERROR("heap not initialized?");
392 return DRM_ERR(EFAULT);
395 i915_mem_takedown( heap );