Merge branch 'master' into upstream
[linux-2.6] / arch / powerpc / mm / imalloc.c
1 /*
2  * c 2001 PPC 64 Team, IBM Corp
3  * 
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  */
9
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16 #include <linux/mutex.h>
17 #include <asm/cacheflush.h>
18
19 #include "mmu_decl.h"
20
21 static DEFINE_MUTEX(imlist_mutex);
22 struct vm_struct * imlist = NULL;
23
24 static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
25 {
26         unsigned long addr;
27         struct vm_struct **p, *tmp;
28
29         addr = ioremap_bot;
30         for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
31                 if (size + addr < (unsigned long) tmp->addr)
32                         break;
33                 if ((unsigned long)tmp->addr >= ioremap_bot)
34                         addr = tmp->size + (unsigned long) tmp->addr;
35                 if (addr >= IMALLOC_END-size)
36                         return 1;
37         }
38         *im_addr = addr;
39
40         return 0;
41 }
42
43 /* Return whether the region described by v_addr and size is a subset
44  * of the region described by parent
45  */
46 static inline int im_region_is_subset(unsigned long v_addr, unsigned long size,
47                         struct vm_struct *parent)
48 {
49         return (int) (v_addr >= (unsigned long) parent->addr &&
50                       v_addr < (unsigned long) parent->addr + parent->size &&
51                       size < parent->size);
52 }
53
54 /* Return whether the region described by v_addr and size is a superset
55  * of the region described by child
56  */
57 static int im_region_is_superset(unsigned long v_addr, unsigned long size,
58                 struct vm_struct *child)
59 {
60         struct vm_struct parent;
61
62         parent.addr = (void *) v_addr;
63         parent.size = size;
64
65         return im_region_is_subset((unsigned long) child->addr, child->size,
66                         &parent);
67 }
68
69 /* Return whether the region described by v_addr and size overlaps
70  * the region described by vm.  Overlapping regions meet the
71  * following conditions:
72  * 1) The regions share some part of the address space
73  * 2) The regions aren't identical
74  * 3) Neither region is a subset of the other
75  */
76 static int im_region_overlaps(unsigned long v_addr, unsigned long size,
77                      struct vm_struct *vm)
78 {
79         if (im_region_is_superset(v_addr, size, vm))
80                 return 0;
81
82         return (v_addr + size > (unsigned long) vm->addr + vm->size &&
83                 v_addr < (unsigned long) vm->addr + vm->size) ||
84                (v_addr < (unsigned long) vm->addr &&
85                 v_addr + size > (unsigned long) vm->addr);
86 }
87
88 /* Determine imalloc status of region described by v_addr and size.
89  * Can return one of the following:
90  * IM_REGION_UNUSED   -  Entire region is unallocated in imalloc space.
91  * IM_REGION_SUBSET -    Region is a subset of a region that is already
92  *                       allocated in imalloc space.
93  *                       vm will be assigned to a ptr to the parent region.
94  * IM_REGION_EXISTS -    Exact region already allocated in imalloc space.
95  *                       vm will be assigned to a ptr to the existing imlist
96  *                       member.
97  * IM_REGION_OVERLAPS -  Region overlaps an allocated region in imalloc space.
98  * IM_REGION_SUPERSET -  Region is a superset of a region that is already
99  *                       allocated in imalloc space.
100  */
101 static int im_region_status(unsigned long v_addr, unsigned long size,
102                     struct vm_struct **vm)
103 {
104         struct vm_struct *tmp;
105
106         for (tmp = imlist; tmp; tmp = tmp->next)
107                 if (v_addr < (unsigned long) tmp->addr + tmp->size)
108                         break;
109
110         *vm = NULL;
111         if (tmp) {
112                 if (im_region_overlaps(v_addr, size, tmp))
113                         return IM_REGION_OVERLAP;
114
115                 *vm = tmp;
116                 if (im_region_is_subset(v_addr, size, tmp)) {
117                         /* Return with tmp pointing to superset */
118                         return IM_REGION_SUBSET;
119                 }
120                 if (im_region_is_superset(v_addr, size, tmp)) {
121                         /* Return with tmp pointing to first subset */
122                         return IM_REGION_SUPERSET;
123                 }
124                 else if (v_addr == (unsigned long) tmp->addr &&
125                          size == tmp->size) {
126                         /* Return with tmp pointing to exact region */
127                         return IM_REGION_EXISTS;
128                 }
129         }
130
131         return IM_REGION_UNUSED;
132 }
133
134 static struct vm_struct * split_im_region(unsigned long v_addr, 
135                 unsigned long size, struct vm_struct *parent)
136 {
137         struct vm_struct *vm1 = NULL;
138         struct vm_struct *vm2 = NULL;
139         struct vm_struct *new_vm = NULL;
140         
141         vm1 = (struct vm_struct *) kmalloc(sizeof(*vm1), GFP_KERNEL);
142         if (vm1 == NULL) {
143                 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
144                 return NULL;
145         }
146
147         if (v_addr == (unsigned long) parent->addr) {
148                 /* Use existing parent vm_struct to represent child, allocate
149                  * new one for the remainder of parent range
150                  */
151                 vm1->size = parent->size - size;
152                 vm1->addr = (void *) (v_addr + size);
153                 vm1->next = parent->next;
154
155                 parent->size = size;
156                 parent->next = vm1;
157                 new_vm = parent;
158         } else if (v_addr + size == (unsigned long) parent->addr + 
159                         parent->size) {
160                 /* Allocate new vm_struct to represent child, use existing
161                  * parent one for remainder of parent range
162                  */
163                 vm1->size = size;
164                 vm1->addr = (void *) v_addr;
165                 vm1->next = parent->next;
166                 new_vm = vm1;
167
168                 parent->size -= size;
169                 parent->next = vm1;
170         } else {
171                 /* Allocate two new vm_structs for the new child and 
172                  * uppermost remainder, and use existing parent one for the
173                  * lower remainder of parent range
174                  */
175                 vm2 = (struct vm_struct *) kmalloc(sizeof(*vm2), GFP_KERNEL);
176                 if (vm2 == NULL) {
177                         printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
178                         kfree(vm1);
179                         return NULL;
180                 }
181
182                 vm1->size = size;
183                 vm1->addr = (void *) v_addr;
184                 vm1->next = vm2;
185                 new_vm = vm1;
186
187                 vm2->size = ((unsigned long) parent->addr + parent->size) - 
188                                 (v_addr + size);
189                 vm2->addr = (void *) v_addr + size;
190                 vm2->next = parent->next;
191
192                 parent->size = v_addr - (unsigned long) parent->addr;
193                 parent->next = vm1;
194         }
195
196         return new_vm;
197 }
198
199 static struct vm_struct * __add_new_im_area(unsigned long req_addr, 
200                                             unsigned long size)
201 {
202         struct vm_struct **p, *tmp, *area;
203                 
204         for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
205                 if (req_addr + size <= (unsigned long)tmp->addr)
206                         break;
207         }
208         
209         area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
210         if (!area)
211                 return NULL;
212         area->flags = 0;
213         area->addr = (void *)req_addr;
214         area->size = size;
215         area->next = *p;
216         *p = area;
217
218         return area;
219 }
220
221 static struct vm_struct * __im_get_area(unsigned long req_addr, 
222                                         unsigned long size,
223                                         int criteria)
224 {
225         struct vm_struct *tmp;
226         int status;
227
228         status = im_region_status(req_addr, size, &tmp);
229         if ((criteria & status) == 0) {
230                 return NULL;
231         }
232         
233         switch (status) {
234         case IM_REGION_UNUSED:
235                 tmp = __add_new_im_area(req_addr, size);
236                 break;
237         case IM_REGION_SUBSET:
238                 tmp = split_im_region(req_addr, size, tmp);
239                 break;
240         case IM_REGION_EXISTS:
241                 /* Return requested region */
242                 break;
243         case IM_REGION_SUPERSET:
244                 /* Return first existing subset of requested region */
245                 break;
246         default:
247                 printk(KERN_ERR "%s() unexpected imalloc region status\n",
248                                 __FUNCTION__);
249                 tmp = NULL;
250         }
251
252         return tmp;
253 }
254
255 struct vm_struct * im_get_free_area(unsigned long size)
256 {
257         struct vm_struct *area;
258         unsigned long addr;
259         
260         mutex_lock(&imlist_mutex);
261         if (get_free_im_addr(size, &addr)) {
262                 printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
263                                 __FUNCTION__, size);
264                 area = NULL;
265                 goto next_im_done;
266         }
267
268         area = __im_get_area(addr, size, IM_REGION_UNUSED);
269         if (area == NULL) {
270                 printk(KERN_ERR 
271                        "%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
272                         __FUNCTION__, addr, size);
273         }
274 next_im_done:
275         mutex_unlock(&imlist_mutex);
276         return area;
277 }
278
279 struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
280                 int criteria)
281 {
282         struct vm_struct *area;
283
284         mutex_lock(&imlist_mutex);
285         area = __im_get_area(v_addr, size, criteria);
286         mutex_unlock(&imlist_mutex);
287         return area;
288 }
289
290 void im_free(void * addr)
291 {
292         struct vm_struct **p, *tmp;
293   
294         if (!addr)
295                 return;
296         if ((unsigned long) addr & ~PAGE_MASK) {
297                 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__,                        addr);
298                 return;
299         }
300         mutex_lock(&imlist_mutex);
301         for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
302                 if (tmp->addr == addr) {
303                         *p = tmp->next;
304                         unmap_vm_area(tmp);
305                         kfree(tmp);
306                         mutex_unlock(&imlist_mutex);
307                         return;
308                 }
309         }
310         mutex_unlock(&imlist_mutex);
311         printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
312                         addr);
313 }