Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/resource.c | |
3 | * | |
4 | * Copyright (C) 1999 Linus Torvalds | |
5 | * Copyright (C) 1999 Martin Mares <mj@ucw.cz> | |
6 | * | |
7 | * Arbitrary resource management. | |
8 | */ | |
9 | ||
1da177e4 | 10 | #include <linux/module.h> |
1da177e4 LT |
11 | #include <linux/errno.h> |
12 | #include <linux/ioport.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/proc_fs.h> | |
18 | #include <linux/seq_file.h> | |
9ac7849e | 19 | #include <linux/device.h> |
1da177e4 LT |
20 | #include <asm/io.h> |
21 | ||
22 | ||
23 | struct resource ioport_resource = { | |
24 | .name = "PCI IO", | |
6550e07f | 25 | .start = 0, |
1da177e4 LT |
26 | .end = IO_SPACE_LIMIT, |
27 | .flags = IORESOURCE_IO, | |
28 | }; | |
1da177e4 LT |
29 | EXPORT_SYMBOL(ioport_resource); |
30 | ||
31 | struct resource iomem_resource = { | |
32 | .name = "PCI mem", | |
6550e07f GKH |
33 | .start = 0, |
34 | .end = -1, | |
1da177e4 LT |
35 | .flags = IORESOURCE_MEM, |
36 | }; | |
1da177e4 LT |
37 | EXPORT_SYMBOL(iomem_resource); |
38 | ||
39 | static DEFINE_RWLOCK(resource_lock); | |
40 | ||
41 | #ifdef CONFIG_PROC_FS | |
42 | ||
43 | enum { MAX_IORES_LEVEL = 5 }; | |
44 | ||
45 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | |
46 | { | |
47 | struct resource *p = v; | |
48 | (*pos)++; | |
49 | if (p->child) | |
50 | return p->child; | |
51 | while (!p->sibling && p->parent) | |
52 | p = p->parent; | |
53 | return p->sibling; | |
54 | } | |
55 | ||
56 | static void *r_start(struct seq_file *m, loff_t *pos) | |
57 | __acquires(resource_lock) | |
58 | { | |
59 | struct resource *p = m->private; | |
60 | loff_t l = 0; | |
61 | read_lock(&resource_lock); | |
62 | for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) | |
63 | ; | |
64 | return p; | |
65 | } | |
66 | ||
67 | static void r_stop(struct seq_file *m, void *v) | |
68 | __releases(resource_lock) | |
69 | { | |
70 | read_unlock(&resource_lock); | |
71 | } | |
72 | ||
73 | static int r_show(struct seq_file *m, void *v) | |
74 | { | |
75 | struct resource *root = m->private; | |
76 | struct resource *r = v, *p; | |
77 | int width = root->end < 0x10000 ? 4 : 8; | |
78 | int depth; | |
79 | ||
80 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) | |
81 | if (p->parent == root) | |
82 | break; | |
685143ac | 83 | seq_printf(m, "%*s%0*llx-%0*llx : %s\n", |
1da177e4 | 84 | depth * 2, "", |
685143ac GKH |
85 | width, (unsigned long long) r->start, |
86 | width, (unsigned long long) r->end, | |
1da177e4 LT |
87 | r->name ? r->name : "<BAD>"); |
88 | return 0; | |
89 | } | |
90 | ||
15ad7cdc | 91 | static const struct seq_operations resource_op = { |
1da177e4 LT |
92 | .start = r_start, |
93 | .next = r_next, | |
94 | .stop = r_stop, | |
95 | .show = r_show, | |
96 | }; | |
97 | ||
98 | static int ioports_open(struct inode *inode, struct file *file) | |
99 | { | |
100 | int res = seq_open(file, &resource_op); | |
101 | if (!res) { | |
102 | struct seq_file *m = file->private_data; | |
103 | m->private = &ioport_resource; | |
104 | } | |
105 | return res; | |
106 | } | |
107 | ||
108 | static int iomem_open(struct inode *inode, struct file *file) | |
109 | { | |
110 | int res = seq_open(file, &resource_op); | |
111 | if (!res) { | |
112 | struct seq_file *m = file->private_data; | |
113 | m->private = &iomem_resource; | |
114 | } | |
115 | return res; | |
116 | } | |
117 | ||
15ad7cdc | 118 | static const struct file_operations proc_ioports_operations = { |
1da177e4 LT |
119 | .open = ioports_open, |
120 | .read = seq_read, | |
121 | .llseek = seq_lseek, | |
122 | .release = seq_release, | |
123 | }; | |
124 | ||
15ad7cdc | 125 | static const struct file_operations proc_iomem_operations = { |
1da177e4 LT |
126 | .open = iomem_open, |
127 | .read = seq_read, | |
128 | .llseek = seq_lseek, | |
129 | .release = seq_release, | |
130 | }; | |
131 | ||
132 | static int __init ioresources_init(void) | |
133 | { | |
c33fff0a DL |
134 | proc_create("ioports", 0, NULL, &proc_ioports_operations); |
135 | proc_create("iomem", 0, NULL, &proc_iomem_operations); | |
1da177e4 LT |
136 | return 0; |
137 | } | |
138 | __initcall(ioresources_init); | |
139 | ||
140 | #endif /* CONFIG_PROC_FS */ | |
141 | ||
142 | /* Return the conflict entry if you can't request it */ | |
143 | static struct resource * __request_resource(struct resource *root, struct resource *new) | |
144 | { | |
d75fc8bb GKH |
145 | resource_size_t start = new->start; |
146 | resource_size_t end = new->end; | |
1da177e4 LT |
147 | struct resource *tmp, **p; |
148 | ||
149 | if (end < start) | |
150 | return root; | |
151 | if (start < root->start) | |
152 | return root; | |
153 | if (end > root->end) | |
154 | return root; | |
155 | p = &root->child; | |
156 | for (;;) { | |
157 | tmp = *p; | |
158 | if (!tmp || tmp->start > end) { | |
159 | new->sibling = tmp; | |
160 | *p = new; | |
161 | new->parent = root; | |
162 | return NULL; | |
163 | } | |
164 | p = &tmp->sibling; | |
165 | if (tmp->end < start) | |
166 | continue; | |
167 | return tmp; | |
168 | } | |
169 | } | |
170 | ||
171 | static int __release_resource(struct resource *old) | |
172 | { | |
173 | struct resource *tmp, **p; | |
174 | ||
175 | p = &old->parent->child; | |
176 | for (;;) { | |
177 | tmp = *p; | |
178 | if (!tmp) | |
179 | break; | |
180 | if (tmp == old) { | |
181 | *p = tmp->sibling; | |
182 | old->parent = NULL; | |
183 | return 0; | |
184 | } | |
185 | p = &tmp->sibling; | |
186 | } | |
187 | return -EINVAL; | |
188 | } | |
189 | ||
e1ca66d1 RD |
190 | /** |
191 | * request_resource - request and reserve an I/O or memory resource | |
192 | * @root: root resource descriptor | |
193 | * @new: resource descriptor desired by caller | |
194 | * | |
195 | * Returns 0 for success, negative error code on error. | |
196 | */ | |
1da177e4 LT |
197 | int request_resource(struct resource *root, struct resource *new) |
198 | { | |
199 | struct resource *conflict; | |
200 | ||
201 | write_lock(&resource_lock); | |
202 | conflict = __request_resource(root, new); | |
203 | write_unlock(&resource_lock); | |
204 | return conflict ? -EBUSY : 0; | |
205 | } | |
206 | ||
207 | EXPORT_SYMBOL(request_resource); | |
208 | ||
e1ca66d1 RD |
209 | /** |
210 | * release_resource - release a previously reserved resource | |
211 | * @old: resource pointer | |
212 | */ | |
1da177e4 LT |
213 | int release_resource(struct resource *old) |
214 | { | |
215 | int retval; | |
216 | ||
217 | write_lock(&resource_lock); | |
218 | retval = __release_resource(old); | |
219 | write_unlock(&resource_lock); | |
220 | return retval; | |
221 | } | |
222 | ||
223 | EXPORT_SYMBOL(release_resource); | |
224 | ||
a99824f3 | 225 | #if defined(CONFIG_MEMORY_HOTPLUG) && !defined(CONFIG_ARCH_HAS_WALK_MEMORY) |
2842f114 KH |
226 | /* |
227 | * Finds the lowest memory reosurce exists within [res->start.res->end) | |
228 | * the caller must specify res->start, res->end, res->flags. | |
229 | * If found, returns 0, res is overwritten, if not found, returns -1. | |
230 | */ | |
75884fb1 | 231 | static int find_next_system_ram(struct resource *res) |
2842f114 KH |
232 | { |
233 | resource_size_t start, end; | |
234 | struct resource *p; | |
235 | ||
236 | BUG_ON(!res); | |
237 | ||
238 | start = res->start; | |
239 | end = res->end; | |
58c1b5b0 | 240 | BUG_ON(start >= end); |
2842f114 KH |
241 | |
242 | read_lock(&resource_lock); | |
243 | for (p = iomem_resource.child; p ; p = p->sibling) { | |
244 | /* system ram is just marked as IORESOURCE_MEM */ | |
245 | if (p->flags != res->flags) | |
246 | continue; | |
247 | if (p->start > end) { | |
248 | p = NULL; | |
249 | break; | |
250 | } | |
58c1b5b0 | 251 | if ((p->end >= start) && (p->start < end)) |
2842f114 KH |
252 | break; |
253 | } | |
254 | read_unlock(&resource_lock); | |
255 | if (!p) | |
256 | return -1; | |
257 | /* copy data */ | |
0f04ab5e KH |
258 | if (res->start < p->start) |
259 | res->start = p->start; | |
260 | if (res->end > p->end) | |
261 | res->end = p->end; | |
2842f114 KH |
262 | return 0; |
263 | } | |
75884fb1 KH |
264 | int |
265 | walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, | |
266 | int (*func)(unsigned long, unsigned long, void *)) | |
267 | { | |
268 | struct resource res; | |
269 | unsigned long pfn, len; | |
270 | u64 orig_end; | |
271 | int ret = -1; | |
272 | res.start = (u64) start_pfn << PAGE_SHIFT; | |
273 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; | |
887c3cb1 | 274 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
75884fb1 KH |
275 | orig_end = res.end; |
276 | while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { | |
277 | pfn = (unsigned long)(res.start >> PAGE_SHIFT); | |
278 | len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); | |
279 | ret = (*func)(pfn, len, arg); | |
280 | if (ret) | |
281 | break; | |
282 | res.start = res.end + 1; | |
283 | res.end = orig_end; | |
284 | } | |
285 | return ret; | |
286 | } | |
287 | ||
2842f114 KH |
288 | #endif |
289 | ||
1da177e4 LT |
290 | /* |
291 | * Find empty slot in the resource tree given range and alignment. | |
292 | */ | |
293 | static int find_resource(struct resource *root, struct resource *new, | |
d75fc8bb GKH |
294 | resource_size_t size, resource_size_t min, |
295 | resource_size_t max, resource_size_t align, | |
1da177e4 | 296 | void (*alignf)(void *, struct resource *, |
d75fc8bb | 297 | resource_size_t, resource_size_t), |
1da177e4 LT |
298 | void *alignf_data) |
299 | { | |
300 | struct resource *this = root->child; | |
301 | ||
302 | new->start = root->start; | |
303 | /* | |
304 | * Skip past an allocated resource that starts at 0, since the assignment | |
305 | * of this->start - 1 to new->end below would cause an underflow. | |
306 | */ | |
307 | if (this && this->start == 0) { | |
308 | new->start = this->end + 1; | |
309 | this = this->sibling; | |
310 | } | |
311 | for(;;) { | |
312 | if (this) | |
313 | new->end = this->start - 1; | |
314 | else | |
315 | new->end = root->end; | |
316 | if (new->start < min) | |
317 | new->start = min; | |
318 | if (new->end > max) | |
319 | new->end = max; | |
8c0e33c1 | 320 | new->start = ALIGN(new->start, align); |
1da177e4 LT |
321 | if (alignf) |
322 | alignf(alignf_data, new, size, align); | |
b52402c7 | 323 | if (new->start < new->end && new->end - new->start >= size - 1) { |
1da177e4 LT |
324 | new->end = new->start + size - 1; |
325 | return 0; | |
326 | } | |
327 | if (!this) | |
328 | break; | |
329 | new->start = this->end + 1; | |
330 | this = this->sibling; | |
331 | } | |
332 | return -EBUSY; | |
333 | } | |
334 | ||
e1ca66d1 RD |
335 | /** |
336 | * allocate_resource - allocate empty slot in the resource tree given range & alignment | |
337 | * @root: root resource descriptor | |
338 | * @new: resource descriptor desired by caller | |
339 | * @size: requested resource region size | |
340 | * @min: minimum size to allocate | |
341 | * @max: maximum size to allocate | |
342 | * @align: alignment requested, in bytes | |
343 | * @alignf: alignment function, optional, called if not NULL | |
344 | * @alignf_data: arbitrary data to pass to the @alignf function | |
1da177e4 LT |
345 | */ |
346 | int allocate_resource(struct resource *root, struct resource *new, | |
d75fc8bb GKH |
347 | resource_size_t size, resource_size_t min, |
348 | resource_size_t max, resource_size_t align, | |
1da177e4 | 349 | void (*alignf)(void *, struct resource *, |
d75fc8bb | 350 | resource_size_t, resource_size_t), |
1da177e4 LT |
351 | void *alignf_data) |
352 | { | |
353 | int err; | |
354 | ||
355 | write_lock(&resource_lock); | |
356 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); | |
357 | if (err >= 0 && __request_resource(root, new)) | |
358 | err = -EBUSY; | |
359 | write_unlock(&resource_lock); | |
360 | return err; | |
361 | } | |
362 | ||
363 | EXPORT_SYMBOL(allocate_resource); | |
364 | ||
365 | /** | |
366 | * insert_resource - Inserts a resource in the resource tree | |
367 | * @parent: parent of the new resource | |
368 | * @new: new resource to insert | |
369 | * | |
370 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | |
371 | * | |
d33b6fba | 372 | * This function is equivalent to request_resource when no conflict |
1da177e4 LT |
373 | * happens. If a conflict happens, and the conflicting resources |
374 | * entirely fit within the range of the new resource, then the new | |
d33b6fba MW |
375 | * resource is inserted and the conflicting resources become children of |
376 | * the new resource. | |
1da177e4 LT |
377 | */ |
378 | int insert_resource(struct resource *parent, struct resource *new) | |
379 | { | |
380 | int result; | |
381 | struct resource *first, *next; | |
382 | ||
383 | write_lock(&resource_lock); | |
d33b6fba MW |
384 | |
385 | for (;; parent = first) { | |
386 | result = 0; | |
387 | first = __request_resource(parent, new); | |
388 | if (!first) | |
389 | goto out; | |
390 | ||
391 | result = -EBUSY; | |
392 | if (first == parent) | |
393 | goto out; | |
394 | ||
395 | if ((first->start > new->start) || (first->end < new->end)) | |
396 | break; | |
397 | if ((first->start == new->start) && (first->end == new->end)) | |
398 | break; | |
1da177e4 LT |
399 | } |
400 | ||
401 | for (next = first; ; next = next->sibling) { | |
402 | /* Partial overlap? Bad, and unfixable */ | |
403 | if (next->start < new->start || next->end > new->end) | |
404 | goto out; | |
405 | if (!next->sibling) | |
406 | break; | |
407 | if (next->sibling->start > new->end) | |
408 | break; | |
409 | } | |
410 | ||
411 | result = 0; | |
412 | ||
413 | new->parent = parent; | |
414 | new->sibling = next->sibling; | |
415 | new->child = first; | |
416 | ||
417 | next->sibling = NULL; | |
418 | for (next = first; next; next = next->sibling) | |
419 | next->parent = new; | |
420 | ||
421 | if (parent->child == first) { | |
422 | parent->child = new; | |
423 | } else { | |
424 | next = parent->child; | |
425 | while (next->sibling != first) | |
426 | next = next->sibling; | |
427 | next->sibling = new; | |
428 | } | |
429 | ||
430 | out: | |
431 | write_unlock(&resource_lock); | |
432 | return result; | |
433 | } | |
434 | ||
e1ca66d1 RD |
435 | /** |
436 | * adjust_resource - modify a resource's start and size | |
437 | * @res: resource to modify | |
438 | * @start: new start value | |
439 | * @size: new size | |
440 | * | |
1da177e4 | 441 | * Given an existing resource, change its start and size to match the |
e1ca66d1 RD |
442 | * arguments. Returns 0 on success, -EBUSY if it can't fit. |
443 | * Existing children of the resource are assumed to be immutable. | |
1da177e4 | 444 | */ |
d75fc8bb | 445 | int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) |
1da177e4 LT |
446 | { |
447 | struct resource *tmp, *parent = res->parent; | |
d75fc8bb | 448 | resource_size_t end = start + size - 1; |
1da177e4 LT |
449 | int result = -EBUSY; |
450 | ||
451 | write_lock(&resource_lock); | |
452 | ||
453 | if ((start < parent->start) || (end > parent->end)) | |
454 | goto out; | |
455 | ||
456 | for (tmp = res->child; tmp; tmp = tmp->sibling) { | |
457 | if ((tmp->start < start) || (tmp->end > end)) | |
458 | goto out; | |
459 | } | |
460 | ||
461 | if (res->sibling && (res->sibling->start <= end)) | |
462 | goto out; | |
463 | ||
464 | tmp = parent->child; | |
465 | if (tmp != res) { | |
466 | while (tmp->sibling != res) | |
467 | tmp = tmp->sibling; | |
468 | if (start <= tmp->end) | |
469 | goto out; | |
470 | } | |
471 | ||
472 | res->start = start; | |
473 | res->end = end; | |
474 | result = 0; | |
475 | ||
476 | out: | |
477 | write_unlock(&resource_lock); | |
478 | return result; | |
479 | } | |
480 | ||
481 | EXPORT_SYMBOL(adjust_resource); | |
482 | ||
88452565 IK |
483 | /** |
484 | * resource_alignment - calculate resource's alignment | |
485 | * @res: resource pointer | |
486 | * | |
487 | * Returns alignment on success, 0 (invalid alignment) on failure. | |
488 | */ | |
489 | resource_size_t resource_alignment(struct resource *res) | |
490 | { | |
491 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { | |
492 | case IORESOURCE_SIZEALIGN: | |
493 | return res->end - res->start + 1; | |
494 | case IORESOURCE_STARTALIGN: | |
495 | return res->start; | |
496 | default: | |
497 | return 0; | |
498 | } | |
499 | } | |
500 | ||
1da177e4 LT |
501 | /* |
502 | * This is compatibility stuff for IO resources. | |
503 | * | |
504 | * Note how this, unlike the above, knows about | |
505 | * the IO flag meanings (busy etc). | |
506 | * | |
e1ca66d1 | 507 | * request_region creates a new busy region. |
1da177e4 | 508 | * |
e1ca66d1 | 509 | * check_region returns non-zero if the area is already busy. |
1da177e4 | 510 | * |
e1ca66d1 RD |
511 | * release_region releases a matching busy region. |
512 | */ | |
513 | ||
514 | /** | |
515 | * __request_region - create a new busy resource region | |
516 | * @parent: parent resource descriptor | |
517 | * @start: resource start address | |
518 | * @n: resource region size | |
519 | * @name: reserving caller's ID string | |
1da177e4 | 520 | */ |
d75fc8bb GKH |
521 | struct resource * __request_region(struct resource *parent, |
522 | resource_size_t start, resource_size_t n, | |
523 | const char *name) | |
1da177e4 | 524 | { |
dd392710 | 525 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); |
1da177e4 LT |
526 | |
527 | if (res) { | |
1da177e4 LT |
528 | res->name = name; |
529 | res->start = start; | |
530 | res->end = start + n - 1; | |
531 | res->flags = IORESOURCE_BUSY; | |
532 | ||
533 | write_lock(&resource_lock); | |
534 | ||
535 | for (;;) { | |
536 | struct resource *conflict; | |
537 | ||
538 | conflict = __request_resource(parent, res); | |
539 | if (!conflict) | |
540 | break; | |
541 | if (conflict != parent) { | |
542 | parent = conflict; | |
543 | if (!(conflict->flags & IORESOURCE_BUSY)) | |
544 | continue; | |
545 | } | |
546 | ||
547 | /* Uhhuh, that didn't work out.. */ | |
548 | kfree(res); | |
549 | res = NULL; | |
550 | break; | |
551 | } | |
552 | write_unlock(&resource_lock); | |
553 | } | |
554 | return res; | |
555 | } | |
1da177e4 LT |
556 | EXPORT_SYMBOL(__request_region); |
557 | ||
e1ca66d1 RD |
558 | /** |
559 | * __check_region - check if a resource region is busy or free | |
560 | * @parent: parent resource descriptor | |
561 | * @start: resource start address | |
562 | * @n: resource region size | |
563 | * | |
564 | * Returns 0 if the region is free at the moment it is checked, | |
565 | * returns %-EBUSY if the region is busy. | |
566 | * | |
567 | * NOTE: | |
568 | * This function is deprecated because its use is racy. | |
569 | * Even if it returns 0, a subsequent call to request_region() | |
570 | * may fail because another driver etc. just allocated the region. | |
571 | * Do NOT use it. It will be removed from the kernel. | |
572 | */ | |
d75fc8bb GKH |
573 | int __check_region(struct resource *parent, resource_size_t start, |
574 | resource_size_t n) | |
1da177e4 LT |
575 | { |
576 | struct resource * res; | |
577 | ||
578 | res = __request_region(parent, start, n, "check-region"); | |
579 | if (!res) | |
580 | return -EBUSY; | |
581 | ||
582 | release_resource(res); | |
583 | kfree(res); | |
584 | return 0; | |
585 | } | |
1da177e4 LT |
586 | EXPORT_SYMBOL(__check_region); |
587 | ||
e1ca66d1 RD |
588 | /** |
589 | * __release_region - release a previously reserved resource region | |
590 | * @parent: parent resource descriptor | |
591 | * @start: resource start address | |
592 | * @n: resource region size | |
593 | * | |
594 | * The described resource region must match a currently busy region. | |
595 | */ | |
d75fc8bb GKH |
596 | void __release_region(struct resource *parent, resource_size_t start, |
597 | resource_size_t n) | |
1da177e4 LT |
598 | { |
599 | struct resource **p; | |
d75fc8bb | 600 | resource_size_t end; |
1da177e4 LT |
601 | |
602 | p = &parent->child; | |
603 | end = start + n - 1; | |
604 | ||
605 | write_lock(&resource_lock); | |
606 | ||
607 | for (;;) { | |
608 | struct resource *res = *p; | |
609 | ||
610 | if (!res) | |
611 | break; | |
612 | if (res->start <= start && res->end >= end) { | |
613 | if (!(res->flags & IORESOURCE_BUSY)) { | |
614 | p = &res->child; | |
615 | continue; | |
616 | } | |
617 | if (res->start != start || res->end != end) | |
618 | break; | |
619 | *p = res->sibling; | |
620 | write_unlock(&resource_lock); | |
621 | kfree(res); | |
622 | return; | |
623 | } | |
624 | p = &res->sibling; | |
625 | } | |
626 | ||
627 | write_unlock(&resource_lock); | |
628 | ||
685143ac GKH |
629 | printk(KERN_WARNING "Trying to free nonexistent resource " |
630 | "<%016llx-%016llx>\n", (unsigned long long)start, | |
631 | (unsigned long long)end); | |
1da177e4 | 632 | } |
1da177e4 LT |
633 | EXPORT_SYMBOL(__release_region); |
634 | ||
9ac7849e TH |
635 | /* |
636 | * Managed region resource | |
637 | */ | |
638 | struct region_devres { | |
639 | struct resource *parent; | |
640 | resource_size_t start; | |
641 | resource_size_t n; | |
642 | }; | |
643 | ||
644 | static void devm_region_release(struct device *dev, void *res) | |
645 | { | |
646 | struct region_devres *this = res; | |
647 | ||
648 | __release_region(this->parent, this->start, this->n); | |
649 | } | |
650 | ||
651 | static int devm_region_match(struct device *dev, void *res, void *match_data) | |
652 | { | |
653 | struct region_devres *this = res, *match = match_data; | |
654 | ||
655 | return this->parent == match->parent && | |
656 | this->start == match->start && this->n == match->n; | |
657 | } | |
658 | ||
659 | struct resource * __devm_request_region(struct device *dev, | |
660 | struct resource *parent, resource_size_t start, | |
661 | resource_size_t n, const char *name) | |
662 | { | |
663 | struct region_devres *dr = NULL; | |
664 | struct resource *res; | |
665 | ||
666 | dr = devres_alloc(devm_region_release, sizeof(struct region_devres), | |
667 | GFP_KERNEL); | |
668 | if (!dr) | |
669 | return NULL; | |
670 | ||
671 | dr->parent = parent; | |
672 | dr->start = start; | |
673 | dr->n = n; | |
674 | ||
675 | res = __request_region(parent, start, n, name); | |
676 | if (res) | |
677 | devres_add(dev, dr); | |
678 | else | |
679 | devres_free(dr); | |
680 | ||
681 | return res; | |
682 | } | |
683 | EXPORT_SYMBOL(__devm_request_region); | |
684 | ||
685 | void __devm_release_region(struct device *dev, struct resource *parent, | |
686 | resource_size_t start, resource_size_t n) | |
687 | { | |
688 | struct region_devres match_data = { parent, start, n }; | |
689 | ||
690 | __release_region(parent, start, n); | |
691 | WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, | |
692 | &match_data)); | |
693 | } | |
694 | EXPORT_SYMBOL(__devm_release_region); | |
695 | ||
1da177e4 LT |
696 | /* |
697 | * Called from init/main.c to reserve IO ports. | |
698 | */ | |
699 | #define MAXRESERVE 4 | |
700 | static int __init reserve_setup(char *str) | |
701 | { | |
702 | static int reserved; | |
703 | static struct resource reserve[MAXRESERVE]; | |
704 | ||
705 | for (;;) { | |
706 | int io_start, io_num; | |
707 | int x = reserved; | |
708 | ||
709 | if (get_option (&str, &io_start) != 2) | |
710 | break; | |
711 | if (get_option (&str, &io_num) == 0) | |
712 | break; | |
713 | if (x < MAXRESERVE) { | |
714 | struct resource *res = reserve + x; | |
715 | res->name = "reserved"; | |
716 | res->start = io_start; | |
717 | res->end = io_start + io_num - 1; | |
718 | res->flags = IORESOURCE_BUSY; | |
719 | res->child = NULL; | |
720 | if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) | |
721 | reserved = x+1; | |
722 | } | |
723 | } | |
724 | return 1; | |
725 | } | |
726 | ||
727 | __setup("reserve=", reserve_setup); |