SLUB: direct pass through of page size or higher kmalloc requests
[linux-2.6] / include / asm-generic / memory_model.h
1 #ifndef __ASM_MEMORY_MODEL_H
2 #define __ASM_MEMORY_MODEL_H
3
4 #ifdef __KERNEL__
5 #ifndef __ASSEMBLY__
6
7 #if defined(CONFIG_FLATMEM)
8
9 #ifndef ARCH_PFN_OFFSET
10 #define ARCH_PFN_OFFSET         (0UL)
11 #endif
12
13 #elif defined(CONFIG_DISCONTIGMEM)
14
15 #ifndef arch_pfn_to_nid
16 #define arch_pfn_to_nid(pfn)    pfn_to_nid(pfn)
17 #endif
18
19 #ifndef arch_local_page_offset
20 #define arch_local_page_offset(pfn, nid)        \
21         ((pfn) - NODE_DATA(nid)->node_start_pfn)
22 #endif
23
24 #endif /* CONFIG_DISCONTIGMEM */
25
26 /*
27  * supports 3 memory models.
28  */
29 #if defined(CONFIG_FLATMEM)
30
31 #define __pfn_to_page(pfn)      (mem_map + ((pfn) - ARCH_PFN_OFFSET))
32 #define __page_to_pfn(page)     ((unsigned long)((page) - mem_map) + \
33                                  ARCH_PFN_OFFSET)
34 #elif defined(CONFIG_DISCONTIGMEM)
35
36 #define __pfn_to_page(pfn)                      \
37 ({      unsigned long __pfn = (pfn);            \
38         unsigned long __nid = arch_pfn_to_nid(pfn);  \
39         NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
40 })
41
42 #define __page_to_pfn(pg)                                               \
43 ({      struct page *__pg = (pg);                                       \
44         struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg));     \
45         (unsigned long)(__pg - __pgdat->node_mem_map) +                 \
46          __pgdat->node_start_pfn;                                       \
47 })
48
49 #elif defined(CONFIG_SPARSEMEM_VMEMMAP)
50
51 /* memmap is virtually contigious.  */
52 #define __pfn_to_page(pfn)      (vmemmap + (pfn))
53 #define __page_to_pfn(page)     ((page) - vmemmap)
54
55 #elif defined(CONFIG_SPARSEMEM)
56 /*
57  * Note: section's mem_map is encorded to reflect its start_pfn.
58  * section[i].section_mem_map == mem_map's address - start_pfn;
59  */
60 #define __page_to_pfn(pg)                                       \
61 ({      struct page *__pg = (pg);                               \
62         int __sec = page_to_section(__pg);                      \
63         (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
64 })
65
66 #define __pfn_to_page(pfn)                              \
67 ({      unsigned long __pfn = (pfn);                    \
68         struct mem_section *__sec = __pfn_to_section(__pfn);    \
69         __section_mem_map_addr(__sec) + __pfn;          \
70 })
71 #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
72
73 #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
74 struct page;
75 /* this is useful when inlined pfn_to_page is too big */
76 extern struct page *pfn_to_page(unsigned long pfn);
77 extern unsigned long page_to_pfn(struct page *page);
78 #else
79 #define page_to_pfn __page_to_pfn
80 #define pfn_to_page __pfn_to_page
81 #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
82
83 #endif /* __ASSEMBLY__ */
84 #endif /* __KERNEL__ */
85
86 #endif