i386: fix iounmap's use of vm_struct's size field
[linux-2.6] / include / linux / hugetlb.h
1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
3
4 #ifdef CONFIG_HUGETLB_PAGE
5
6 #include <linux/mempolicy.h>
7 #include <linux/shm.h>
8 #include <asm/tlbflush.h>
9
10 struct ctl_table;
11
12 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
13 {
14         return vma->vm_flags & VM_HUGETLB;
15 }
16
17 int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
18 int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
19 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
20 int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
21 void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
22 void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
23 int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
24 int hugetlb_report_meminfo(char *);
25 int hugetlb_report_node_meminfo(int, char *);
26 unsigned long hugetlb_total_pages(void);
27 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
28                         unsigned long address, int write_access);
29 int hugetlb_reserve_pages(struct inode *inode, long from, long to);
30 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
31
32 extern unsigned long max_huge_pages;
33 extern unsigned long hugepages_treat_as_movable;
34 extern const unsigned long hugetlb_zero, hugetlb_infinity;
35 extern int sysctl_hugetlb_shm_group;
36
37 /* arch callbacks */
38
39 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
40 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
41 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
42 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
43                               int write);
44 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
45                                 pmd_t *pmd, int write);
46 int pmd_huge(pmd_t pmd);
47 void hugetlb_change_protection(struct vm_area_struct *vma,
48                 unsigned long address, unsigned long end, pgprot_t newprot);
49
50 #ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
51 #define is_hugepage_only_range(mm, addr, len)   0
52 #endif
53
54 #ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
55 #define hugetlb_free_pgd_range  free_pgd_range
56 #else
57 void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
58                             unsigned long end, unsigned long floor,
59                             unsigned long ceiling);
60 #endif
61
62 #ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
63 /*
64  * If the arch doesn't supply something else, assume that hugepage
65  * size aligned regions are ok without further preparation.
66  */
67 static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
68                                                 pgoff_t pgoff)
69 {
70         if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
71                 return -EINVAL;
72         if (len & ~HPAGE_MASK)
73                 return -EINVAL;
74         if (addr & ~HPAGE_MASK)
75                 return -EINVAL;
76         return 0;
77 }
78 #else
79 int prepare_hugepage_range(unsigned long addr, unsigned long len,
80                                                 pgoff_t pgoff);
81 #endif
82
83 #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
84 #define set_huge_pte_at(mm, addr, ptep, pte)    set_pte_at(mm, addr, ptep, pte)
85 #define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
86 #else
87 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
88                      pte_t *ptep, pte_t pte);
89 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
90                               pte_t *ptep);
91 #endif
92
93 #ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
94 #define hugetlb_prefault_arch_hook(mm)          do { } while (0)
95 #else
96 void hugetlb_prefault_arch_hook(struct mm_struct *mm);
97 #endif
98
99 #else /* !CONFIG_HUGETLB_PAGE */
100
101 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
102 {
103         return 0;
104 }
105 static inline unsigned long hugetlb_total_pages(void)
106 {
107         return 0;
108 }
109
110 #define follow_hugetlb_page(m,v,p,vs,a,b,i)     ({ BUG(); 0; })
111 #define follow_huge_addr(mm, addr, write)       ERR_PTR(-EINVAL)
112 #define copy_hugetlb_page_range(src, dst, vma)  ({ BUG(); 0; })
113 #define hugetlb_prefault(mapping, vma)          ({ BUG(); 0; })
114 #define unmap_hugepage_range(vma, start, end)   BUG()
115 #define hugetlb_report_meminfo(buf)             0
116 #define hugetlb_report_node_meminfo(n, buf)     0
117 #define follow_huge_pmd(mm, addr, pmd, write)   NULL
118 #define prepare_hugepage_range(addr,len,pgoff)  (-EINVAL)
119 #define pmd_huge(x)     0
120 #define is_hugepage_only_range(mm, addr, len)   0
121 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
122 #define hugetlb_fault(mm, vma, addr, write)     ({ BUG(); 0; })
123
124 #define hugetlb_change_protection(vma, address, end, newprot)
125
126 #ifndef HPAGE_MASK
127 #define HPAGE_MASK      PAGE_MASK               /* Keep the compiler happy */
128 #define HPAGE_SIZE      PAGE_SIZE
129 #endif
130
131 #endif /* !CONFIG_HUGETLB_PAGE */
132
133 #ifdef CONFIG_HUGETLBFS
134 struct hugetlbfs_config {
135         uid_t   uid;
136         gid_t   gid;
137         umode_t mode;
138         long    nr_blocks;
139         long    nr_inodes;
140 };
141
142 struct hugetlbfs_sb_info {
143         long    max_blocks;   /* blocks allowed */
144         long    free_blocks;  /* blocks free */
145         long    max_inodes;   /* inodes allowed */
146         long    free_inodes;  /* inodes free */
147         spinlock_t      stat_lock;
148 };
149
150
151 struct hugetlbfs_inode_info {
152         struct shared_policy policy;
153         struct inode vfs_inode;
154 };
155
156 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
157 {
158         return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
159 }
160
161 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
162 {
163         return sb->s_fs_info;
164 }
165
166 extern const struct file_operations hugetlbfs_file_operations;
167 extern struct vm_operations_struct hugetlb_vm_ops;
168 struct file *hugetlb_file_setup(const char *name, size_t);
169 int hugetlb_get_quota(struct address_space *mapping);
170 void hugetlb_put_quota(struct address_space *mapping);
171
172 static inline int is_file_hugepages(struct file *file)
173 {
174         if (file->f_op == &hugetlbfs_file_operations)
175                 return 1;
176         if (is_file_shm_hugepages(file))
177                 return 1;
178
179         return 0;
180 }
181
182 static inline void set_file_hugepages(struct file *file)
183 {
184         file->f_op = &hugetlbfs_file_operations;
185 }
186 #else /* !CONFIG_HUGETLBFS */
187
188 #define is_file_hugepages(file)         0
189 #define set_file_hugepages(file)        BUG()
190 #define hugetlb_file_setup(name,size)   ERR_PTR(-ENOSYS)
191
192 #endif /* !CONFIG_HUGETLBFS */
193
194 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
195 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
196                                         unsigned long len, unsigned long pgoff,
197                                         unsigned long flags);
198 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
199
200 #endif /* _LINUX_HUGETLB_H */