1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
46 #include <linux/slab.h>
48 #define MM_UNUSED_TARGET 4
50 unsigned long drm_mm_tail_space(struct drm_mm *mm)
52 struct list_head *tail_node;
53 struct drm_mm_node *entry;
55 tail_node = mm->ml_entry.prev;
56 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
63 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
65 struct list_head *tail_node;
66 struct drm_mm_node *entry;
68 tail_node = mm->ml_entry.prev;
69 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
73 if (entry->size <= size)
80 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
82 struct drm_mm_node *child;
85 child = kmalloc(sizeof(*child), GFP_ATOMIC);
87 child = kmalloc(sizeof(*child), GFP_KERNEL);
89 if (unlikely(child == NULL)) {
90 spin_lock(&mm->unused_lock);
91 if (list_empty(&mm->unused_nodes))
95 list_entry(mm->unused_nodes.next,
96 struct drm_mm_node, fl_entry);
97 list_del(&child->fl_entry);
100 spin_unlock(&mm->unused_lock);
105 int drm_mm_pre_get(struct drm_mm *mm)
107 struct drm_mm_node *node;
109 spin_lock(&mm->unused_lock);
110 while (mm->num_unused < MM_UNUSED_TARGET) {
111 spin_unlock(&mm->unused_lock);
112 node = kmalloc(sizeof(*node), GFP_KERNEL);
113 spin_lock(&mm->unused_lock);
115 if (unlikely(node == NULL)) {
116 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
117 spin_unlock(&mm->unused_lock);
121 list_add_tail(&node->fl_entry, &mm->unused_nodes);
123 spin_unlock(&mm->unused_lock);
126 EXPORT_SYMBOL(drm_mm_pre_get);
128 static int drm_mm_create_tail_node(struct drm_mm *mm,
130 unsigned long size, int atomic)
132 struct drm_mm_node *child;
134 child = drm_mm_kmalloc(mm, atomic);
135 if (unlikely(child == NULL))
140 child->start = start;
143 list_add_tail(&child->ml_entry, &mm->ml_entry);
144 list_add_tail(&child->fl_entry, &mm->fl_entry);
149 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
151 struct list_head *tail_node;
152 struct drm_mm_node *entry;
154 tail_node = mm->ml_entry.prev;
155 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
157 return drm_mm_create_tail_node(mm, entry->start + entry->size,
164 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
168 struct drm_mm_node *child;
170 child = drm_mm_kmalloc(parent->mm, atomic);
171 if (unlikely(child == NULL))
174 INIT_LIST_HEAD(&child->fl_entry);
178 child->start = parent->start;
179 child->mm = parent->mm;
181 list_add_tail(&child->ml_entry, &parent->ml_entry);
182 INIT_LIST_HEAD(&child->fl_entry);
184 parent->size -= size;
185 parent->start += size;
191 struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
192 unsigned long size, unsigned alignment)
195 struct drm_mm_node *align_splitoff = NULL;
196 struct drm_mm_node *child;
200 tmp = parent->start % alignment;
204 drm_mm_split_at_start(parent, alignment - tmp, 0);
205 if (unlikely(align_splitoff == NULL))
209 if (parent->size == size) {
210 list_del_init(&parent->fl_entry);
214 child = drm_mm_split_at_start(parent, size, 0);
218 drm_mm_put_block(align_splitoff);
223 EXPORT_SYMBOL(drm_mm_get_block);
225 struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
230 struct drm_mm_node *align_splitoff = NULL;
231 struct drm_mm_node *child;
235 tmp = parent->start % alignment;
239 drm_mm_split_at_start(parent, alignment - tmp, 1);
240 if (unlikely(align_splitoff == NULL))
244 if (parent->size == size) {
245 list_del_init(&parent->fl_entry);
249 child = drm_mm_split_at_start(parent, size, 1);
253 drm_mm_put_block(align_splitoff);
257 EXPORT_SYMBOL(drm_mm_get_block_atomic);
260 * Put a block. Merge with the previous and / or next block if they are free.
261 * Otherwise add to the free stack.
264 void drm_mm_put_block(struct drm_mm_node *cur)
267 struct drm_mm *mm = cur->mm;
268 struct list_head *cur_head = &cur->ml_entry;
269 struct list_head *root_head = &mm->ml_entry;
270 struct drm_mm_node *prev_node = NULL;
271 struct drm_mm_node *next_node;
275 if (cur_head->prev != root_head) {
277 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
278 if (prev_node->free) {
279 prev_node->size += cur->size;
283 if (cur_head->next != root_head) {
285 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
286 if (next_node->free) {
288 prev_node->size += next_node->size;
289 list_del(&next_node->ml_entry);
290 list_del(&next_node->fl_entry);
291 if (mm->num_unused < MM_UNUSED_TARGET) {
292 list_add(&next_node->fl_entry,
298 next_node->size += cur->size;
299 next_node->start = cur->start;
306 list_add(&cur->fl_entry, &mm->fl_entry);
308 list_del(&cur->ml_entry);
309 if (mm->num_unused < MM_UNUSED_TARGET) {
310 list_add(&cur->fl_entry, &mm->unused_nodes);
317 EXPORT_SYMBOL(drm_mm_put_block);
319 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
321 unsigned alignment, int best_match)
323 struct list_head *list;
324 const struct list_head *free_stack = &mm->fl_entry;
325 struct drm_mm_node *entry;
326 struct drm_mm_node *best;
327 unsigned long best_size;
333 list_for_each(list, free_stack) {
334 entry = list_entry(list, struct drm_mm_node, fl_entry);
337 if (entry->size < size)
341 register unsigned tmp = entry->start % alignment;
343 wasted += alignment - tmp;
346 if (entry->size >= size + wasted) {
349 if (size < best_size) {
351 best_size = entry->size;
358 EXPORT_SYMBOL(drm_mm_search_free);
360 int drm_mm_clean(struct drm_mm * mm)
362 struct list_head *head = &mm->ml_entry;
364 return (head->next->next == head);
366 EXPORT_SYMBOL(drm_mm_clean);
368 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
370 INIT_LIST_HEAD(&mm->ml_entry);
371 INIT_LIST_HEAD(&mm->fl_entry);
372 INIT_LIST_HEAD(&mm->unused_nodes);
374 spin_lock_init(&mm->unused_lock);
376 return drm_mm_create_tail_node(mm, start, size, 0);
378 EXPORT_SYMBOL(drm_mm_init);
380 void drm_mm_takedown(struct drm_mm * mm)
382 struct list_head *bnode = mm->fl_entry.next;
383 struct drm_mm_node *entry;
384 struct drm_mm_node *next;
386 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
388 if (entry->ml_entry.next != &mm->ml_entry ||
389 entry->fl_entry.next != &mm->fl_entry) {
390 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
394 list_del(&entry->fl_entry);
395 list_del(&entry->ml_entry);
398 spin_lock(&mm->unused_lock);
399 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
400 list_del(&entry->fl_entry);
404 spin_unlock(&mm->unused_lock);
406 BUG_ON(mm->num_unused != 0);
408 EXPORT_SYMBOL(drm_mm_takedown);