Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/driver-2.6
[linux-2.6] / include / linux / slab_def.h
1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
3
4 /*
5  * Definitions unique to the original Linux SLAB allocator.
6  *
7  * What we provide here is a way to optimize the frequent kmalloc
8  * calls in the kernel by selecting the appropriate general cache
9  * if kmalloc was called with a size that can be established at
10  * compile time.
11  */
12
13 #include <linux/init.h>
14 #include <asm/page.h>           /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h>          /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17
18 /* Size description struct for general caches. */
19 struct cache_sizes {
20         size_t                  cs_size;
21         struct kmem_cache       *cs_cachep;
22 #ifdef CONFIG_ZONE_DMA
23         struct kmem_cache       *cs_dmacachep;
24 #endif
25 };
26 extern struct cache_sizes malloc_sizes[];
27
28 static inline void *kmalloc(size_t size, gfp_t flags)
29 {
30         if (__builtin_constant_p(size)) {
31                 int i = 0;
32 #define CACHE(x) \
33                 if (size <= x) \
34                         goto found; \
35                 else \
36                         i++;
37 #include "kmalloc_sizes.h"
38 #undef CACHE
39                 {
40                         extern void __you_cannot_kmalloc_that_much(void);
41                         __you_cannot_kmalloc_that_much();
42                 }
43 found:
44 #ifdef CONFIG_ZONE_DMA
45                 if (flags & GFP_DMA)
46                         return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
47                                                 flags);
48 #endif
49                 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
50         }
51         return __kmalloc(size, flags);
52 }
53
54 static inline void *kzalloc(size_t size, gfp_t flags)
55 {
56         if (__builtin_constant_p(size)) {
57                 int i = 0;
58 #define CACHE(x) \
59                 if (size <= x) \
60                         goto found; \
61                 else \
62                         i++;
63 #include "kmalloc_sizes.h"
64 #undef CACHE
65                 {
66                         extern void __you_cannot_kzalloc_that_much(void);
67                         __you_cannot_kzalloc_that_much();
68                 }
69 found:
70 #ifdef CONFIG_ZONE_DMA
71                 if (flags & GFP_DMA)
72                         return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
73                                                 flags);
74 #endif
75                 return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
76         }
77         return __kzalloc(size, flags);
78 }
79
80 #ifdef CONFIG_NUMA
81 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
82
83 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
84 {
85         if (__builtin_constant_p(size)) {
86                 int i = 0;
87 #define CACHE(x) \
88                 if (size <= x) \
89                         goto found; \
90                 else \
91                         i++;
92 #include "kmalloc_sizes.h"
93 #undef CACHE
94                 {
95                         extern void __you_cannot_kmalloc_that_much(void);
96                         __you_cannot_kmalloc_that_much();
97                 }
98 found:
99 #ifdef CONFIG_ZONE_DMA
100                 if (flags & GFP_DMA)
101                         return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
102                                                 flags, node);
103 #endif
104                 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
105                                                 flags, node);
106         }
107         return __kmalloc_node(size, flags, node);
108 }
109
110 #endif  /* CONFIG_NUMA */
111
112 extern const struct seq_operations slabinfo_op;
113 ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
114
115 #endif  /* _LINUX_SLAB_DEF_H */