Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[linux-2.6] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
17
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20
21 #include <linux/hugetlb.h>
22 #include "internal.h"
23
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26 unsigned long max_huge_pages;
27 static struct list_head hugepage_freelists[MAX_NUMNODES];
28 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29 static unsigned int free_huge_pages_node[MAX_NUMNODES];
30 /*
31  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32  */
33 static DEFINE_SPINLOCK(hugetlb_lock);
34
35 static void clear_huge_page(struct page *page, unsigned long addr)
36 {
37         int i;
38
39         might_sleep();
40         for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
41                 cond_resched();
42                 clear_user_highpage(page + i, addr);
43         }
44 }
45
46 static void copy_huge_page(struct page *dst, struct page *src,
47                            unsigned long addr, struct vm_area_struct *vma)
48 {
49         int i;
50
51         might_sleep();
52         for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
53                 cond_resched();
54                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
55         }
56 }
57
58 static void enqueue_huge_page(struct page *page)
59 {
60         int nid = page_to_nid(page);
61         list_add(&page->lru, &hugepage_freelists[nid]);
62         free_huge_pages++;
63         free_huge_pages_node[nid]++;
64 }
65
66 static struct page *dequeue_huge_page(struct vm_area_struct *vma,
67                                 unsigned long address)
68 {
69         int nid = numa_node_id();
70         struct page *page = NULL;
71         struct zonelist *zonelist = huge_zonelist(vma, address);
72         struct zone **z;
73
74         for (z = zonelist->zones; *z; z++) {
75                 nid = zone_to_nid(*z);
76                 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
77                     !list_empty(&hugepage_freelists[nid]))
78                         break;
79         }
80
81         if (*z) {
82                 page = list_entry(hugepage_freelists[nid].next,
83                                   struct page, lru);
84                 list_del(&page->lru);
85                 free_huge_pages--;
86                 free_huge_pages_node[nid]--;
87         }
88         return page;
89 }
90
91 static void free_huge_page(struct page *page)
92 {
93         BUG_ON(page_count(page));
94
95         INIT_LIST_HEAD(&page->lru);
96
97         spin_lock(&hugetlb_lock);
98         enqueue_huge_page(page);
99         spin_unlock(&hugetlb_lock);
100 }
101
102 static int alloc_fresh_huge_page(void)
103 {
104         static int nid = 0;
105         struct page *page;
106         page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
107                                         HUGETLB_PAGE_ORDER);
108         nid = next_node(nid, node_online_map);
109         if (nid == MAX_NUMNODES)
110                 nid = first_node(node_online_map);
111         if (page) {
112                 set_compound_page_dtor(page, free_huge_page);
113                 spin_lock(&hugetlb_lock);
114                 nr_huge_pages++;
115                 nr_huge_pages_node[page_to_nid(page)]++;
116                 spin_unlock(&hugetlb_lock);
117                 put_page(page); /* free it into the hugepage allocator */
118                 return 1;
119         }
120         return 0;
121 }
122
123 static struct page *alloc_huge_page(struct vm_area_struct *vma,
124                                     unsigned long addr)
125 {
126         struct page *page;
127
128         spin_lock(&hugetlb_lock);
129         if (vma->vm_flags & VM_MAYSHARE)
130                 resv_huge_pages--;
131         else if (free_huge_pages <= resv_huge_pages)
132                 goto fail;
133
134         page = dequeue_huge_page(vma, addr);
135         if (!page)
136                 goto fail;
137
138         spin_unlock(&hugetlb_lock);
139         set_page_refcounted(page);
140         return page;
141
142 fail:
143         spin_unlock(&hugetlb_lock);
144         return NULL;
145 }
146
147 static int __init hugetlb_init(void)
148 {
149         unsigned long i;
150
151         if (HPAGE_SHIFT == 0)
152                 return 0;
153
154         for (i = 0; i < MAX_NUMNODES; ++i)
155                 INIT_LIST_HEAD(&hugepage_freelists[i]);
156
157         for (i = 0; i < max_huge_pages; ++i) {
158                 if (!alloc_fresh_huge_page())
159                         break;
160         }
161         max_huge_pages = free_huge_pages = nr_huge_pages = i;
162         printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
163         return 0;
164 }
165 module_init(hugetlb_init);
166
167 static int __init hugetlb_setup(char *s)
168 {
169         if (sscanf(s, "%lu", &max_huge_pages) <= 0)
170                 max_huge_pages = 0;
171         return 1;
172 }
173 __setup("hugepages=", hugetlb_setup);
174
175 #ifdef CONFIG_SYSCTL
176 static void update_and_free_page(struct page *page)
177 {
178         int i;
179         nr_huge_pages--;
180         nr_huge_pages_node[page_to_nid(page)]--;
181         for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
182                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
183                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
184                                 1 << PG_private | 1<< PG_writeback);
185         }
186         page[1].lru.next = NULL;
187         set_page_refcounted(page);
188         __free_pages(page, HUGETLB_PAGE_ORDER);
189 }
190
191 #ifdef CONFIG_HIGHMEM
192 static void try_to_free_low(unsigned long count)
193 {
194         int i;
195
196         for (i = 0; i < MAX_NUMNODES; ++i) {
197                 struct page *page, *next;
198                 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
199                         if (PageHighMem(page))
200                                 continue;
201                         list_del(&page->lru);
202                         update_and_free_page(page);
203                         free_huge_pages--;
204                         free_huge_pages_node[page_to_nid(page)]--;
205                         if (count >= nr_huge_pages)
206                                 return;
207                 }
208         }
209 }
210 #else
211 static inline void try_to_free_low(unsigned long count)
212 {
213 }
214 #endif
215
216 static unsigned long set_max_huge_pages(unsigned long count)
217 {
218         while (count > nr_huge_pages) {
219                 if (!alloc_fresh_huge_page())
220                         return nr_huge_pages;
221         }
222         if (count >= nr_huge_pages)
223                 return nr_huge_pages;
224
225         spin_lock(&hugetlb_lock);
226         count = max(count, resv_huge_pages);
227         try_to_free_low(count);
228         while (count < nr_huge_pages) {
229                 struct page *page = dequeue_huge_page(NULL, 0);
230                 if (!page)
231                         break;
232                 update_and_free_page(page);
233         }
234         spin_unlock(&hugetlb_lock);
235         return nr_huge_pages;
236 }
237
238 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
239                            struct file *file, void __user *buffer,
240                            size_t *length, loff_t *ppos)
241 {
242         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
243         max_huge_pages = set_max_huge_pages(max_huge_pages);
244         return 0;
245 }
246 #endif /* CONFIG_SYSCTL */
247
248 int hugetlb_report_meminfo(char *buf)
249 {
250         return sprintf(buf,
251                         "HugePages_Total: %5lu\n"
252                         "HugePages_Free:  %5lu\n"
253                         "HugePages_Rsvd:  %5lu\n"
254                         "Hugepagesize:    %5lu kB\n",
255                         nr_huge_pages,
256                         free_huge_pages,
257                         resv_huge_pages,
258                         HPAGE_SIZE/1024);
259 }
260
261 int hugetlb_report_node_meminfo(int nid, char *buf)
262 {
263         return sprintf(buf,
264                 "Node %d HugePages_Total: %5u\n"
265                 "Node %d HugePages_Free:  %5u\n",
266                 nid, nr_huge_pages_node[nid],
267                 nid, free_huge_pages_node[nid]);
268 }
269
270 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
271 unsigned long hugetlb_total_pages(void)
272 {
273         return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
274 }
275
276 /*
277  * We cannot handle pagefaults against hugetlb pages at all.  They cause
278  * handle_mm_fault() to try to instantiate regular-sized pages in the
279  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
280  * this far.
281  */
282 static struct page *hugetlb_nopage(struct vm_area_struct *vma,
283                                 unsigned long address, int *unused)
284 {
285         BUG();
286         return NULL;
287 }
288
289 struct vm_operations_struct hugetlb_vm_ops = {
290         .nopage = hugetlb_nopage,
291 };
292
293 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
294                                 int writable)
295 {
296         pte_t entry;
297
298         if (writable) {
299                 entry =
300                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
301         } else {
302                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
303         }
304         entry = pte_mkyoung(entry);
305         entry = pte_mkhuge(entry);
306
307         return entry;
308 }
309
310 static void set_huge_ptep_writable(struct vm_area_struct *vma,
311                                    unsigned long address, pte_t *ptep)
312 {
313         pte_t entry;
314
315         entry = pte_mkwrite(pte_mkdirty(*ptep));
316         ptep_set_access_flags(vma, address, ptep, entry, 1);
317         update_mmu_cache(vma, address, entry);
318         lazy_mmu_prot_update(entry);
319 }
320
321
322 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
323                             struct vm_area_struct *vma)
324 {
325         pte_t *src_pte, *dst_pte, entry;
326         struct page *ptepage;
327         unsigned long addr;
328         int cow;
329
330         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
331
332         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
333                 src_pte = huge_pte_offset(src, addr);
334                 if (!src_pte)
335                         continue;
336                 dst_pte = huge_pte_alloc(dst, addr);
337                 if (!dst_pte)
338                         goto nomem;
339                 spin_lock(&dst->page_table_lock);
340                 spin_lock(&src->page_table_lock);
341                 if (!pte_none(*src_pte)) {
342                         if (cow)
343                                 ptep_set_wrprotect(src, addr, src_pte);
344                         entry = *src_pte;
345                         ptepage = pte_page(entry);
346                         get_page(ptepage);
347                         set_huge_pte_at(dst, addr, dst_pte, entry);
348                 }
349                 spin_unlock(&src->page_table_lock);
350                 spin_unlock(&dst->page_table_lock);
351         }
352         return 0;
353
354 nomem:
355         return -ENOMEM;
356 }
357
358 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
359                             unsigned long end)
360 {
361         struct mm_struct *mm = vma->vm_mm;
362         unsigned long address;
363         pte_t *ptep;
364         pte_t pte;
365         struct page *page;
366         struct page *tmp;
367         /*
368          * A page gathering list, protected by per file i_mmap_lock. The
369          * lock is used to avoid list corruption from multiple unmapping
370          * of the same page since we are using page->lru.
371          */
372         LIST_HEAD(page_list);
373
374         WARN_ON(!is_vm_hugetlb_page(vma));
375         BUG_ON(start & ~HPAGE_MASK);
376         BUG_ON(end & ~HPAGE_MASK);
377
378         spin_lock(&mm->page_table_lock);
379         for (address = start; address < end; address += HPAGE_SIZE) {
380                 ptep = huge_pte_offset(mm, address);
381                 if (!ptep)
382                         continue;
383
384                 if (huge_pmd_unshare(mm, &address, ptep))
385                         continue;
386
387                 pte = huge_ptep_get_and_clear(mm, address, ptep);
388                 if (pte_none(pte))
389                         continue;
390
391                 page = pte_page(pte);
392                 if (pte_dirty(pte))
393                         set_page_dirty(page);
394                 list_add(&page->lru, &page_list);
395         }
396         spin_unlock(&mm->page_table_lock);
397         flush_tlb_range(vma, start, end);
398         list_for_each_entry_safe(page, tmp, &page_list, lru) {
399                 list_del(&page->lru);
400                 put_page(page);
401         }
402 }
403
404 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
405                           unsigned long end)
406 {
407         /*
408          * It is undesirable to test vma->vm_file as it should be non-null
409          * for valid hugetlb area. However, vm_file will be NULL in the error
410          * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
411          * do_mmap_pgoff() nullifies vma->vm_file before calling this function
412          * to clean up. Since no pte has actually been setup, it is safe to
413          * do nothing in this case.
414          */
415         if (vma->vm_file) {
416                 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
417                 __unmap_hugepage_range(vma, start, end);
418                 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
419         }
420 }
421
422 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
423                         unsigned long address, pte_t *ptep, pte_t pte)
424 {
425         struct page *old_page, *new_page;
426         int avoidcopy;
427
428         old_page = pte_page(pte);
429
430         /* If no-one else is actually using this page, avoid the copy
431          * and just make the page writable */
432         avoidcopy = (page_count(old_page) == 1);
433         if (avoidcopy) {
434                 set_huge_ptep_writable(vma, address, ptep);
435                 return VM_FAULT_MINOR;
436         }
437
438         page_cache_get(old_page);
439         new_page = alloc_huge_page(vma, address);
440
441         if (!new_page) {
442                 page_cache_release(old_page);
443                 return VM_FAULT_OOM;
444         }
445
446         spin_unlock(&mm->page_table_lock);
447         copy_huge_page(new_page, old_page, address, vma);
448         spin_lock(&mm->page_table_lock);
449
450         ptep = huge_pte_offset(mm, address & HPAGE_MASK);
451         if (likely(pte_same(*ptep, pte))) {
452                 /* Break COW */
453                 set_huge_pte_at(mm, address, ptep,
454                                 make_huge_pte(vma, new_page, 1));
455                 /* Make the old page be freed below */
456                 new_page = old_page;
457         }
458         page_cache_release(new_page);
459         page_cache_release(old_page);
460         return VM_FAULT_MINOR;
461 }
462
463 int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
464                         unsigned long address, pte_t *ptep, int write_access)
465 {
466         int ret = VM_FAULT_SIGBUS;
467         unsigned long idx;
468         unsigned long size;
469         struct page *page;
470         struct address_space *mapping;
471         pte_t new_pte;
472
473         mapping = vma->vm_file->f_mapping;
474         idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
475                 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
476
477         /*
478          * Use page lock to guard against racing truncation
479          * before we get page_table_lock.
480          */
481 retry:
482         page = find_lock_page(mapping, idx);
483         if (!page) {
484                 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
485                 if (idx >= size)
486                         goto out;
487                 if (hugetlb_get_quota(mapping))
488                         goto out;
489                 page = alloc_huge_page(vma, address);
490                 if (!page) {
491                         hugetlb_put_quota(mapping);
492                         ret = VM_FAULT_OOM;
493                         goto out;
494                 }
495                 clear_huge_page(page, address);
496
497                 if (vma->vm_flags & VM_SHARED) {
498                         int err;
499
500                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
501                         if (err) {
502                                 put_page(page);
503                                 hugetlb_put_quota(mapping);
504                                 if (err == -EEXIST)
505                                         goto retry;
506                                 goto out;
507                         }
508                 } else
509                         lock_page(page);
510         }
511
512         spin_lock(&mm->page_table_lock);
513         size = i_size_read(mapping->host) >> HPAGE_SHIFT;
514         if (idx >= size)
515                 goto backout;
516
517         ret = VM_FAULT_MINOR;
518         if (!pte_none(*ptep))
519                 goto backout;
520
521         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
522                                 && (vma->vm_flags & VM_SHARED)));
523         set_huge_pte_at(mm, address, ptep, new_pte);
524
525         if (write_access && !(vma->vm_flags & VM_SHARED)) {
526                 /* Optimization, do the COW without a second fault */
527                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
528         }
529
530         spin_unlock(&mm->page_table_lock);
531         unlock_page(page);
532 out:
533         return ret;
534
535 backout:
536         spin_unlock(&mm->page_table_lock);
537         hugetlb_put_quota(mapping);
538         unlock_page(page);
539         put_page(page);
540         goto out;
541 }
542
543 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
544                         unsigned long address, int write_access)
545 {
546         pte_t *ptep;
547         pte_t entry;
548         int ret;
549         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
550
551         ptep = huge_pte_alloc(mm, address);
552         if (!ptep)
553                 return VM_FAULT_OOM;
554
555         /*
556          * Serialize hugepage allocation and instantiation, so that we don't
557          * get spurious allocation failures if two CPUs race to instantiate
558          * the same page in the page cache.
559          */
560         mutex_lock(&hugetlb_instantiation_mutex);
561         entry = *ptep;
562         if (pte_none(entry)) {
563                 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
564                 mutex_unlock(&hugetlb_instantiation_mutex);
565                 return ret;
566         }
567
568         ret = VM_FAULT_MINOR;
569
570         spin_lock(&mm->page_table_lock);
571         /* Check for a racing update before calling hugetlb_cow */
572         if (likely(pte_same(entry, *ptep)))
573                 if (write_access && !pte_write(entry))
574                         ret = hugetlb_cow(mm, vma, address, ptep, entry);
575         spin_unlock(&mm->page_table_lock);
576         mutex_unlock(&hugetlb_instantiation_mutex);
577
578         return ret;
579 }
580
581 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
582                         struct page **pages, struct vm_area_struct **vmas,
583                         unsigned long *position, int *length, int i)
584 {
585         unsigned long pfn_offset;
586         unsigned long vaddr = *position;
587         int remainder = *length;
588
589         spin_lock(&mm->page_table_lock);
590         while (vaddr < vma->vm_end && remainder) {
591                 pte_t *pte;
592                 struct page *page;
593
594                 /*
595                  * Some archs (sparc64, sh*) have multiple pte_ts to
596                  * each hugepage.  We have to make * sure we get the
597                  * first, for the page indexing below to work.
598                  */
599                 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
600
601                 if (!pte || pte_none(*pte)) {
602                         int ret;
603
604                         spin_unlock(&mm->page_table_lock);
605                         ret = hugetlb_fault(mm, vma, vaddr, 0);
606                         spin_lock(&mm->page_table_lock);
607                         if (ret == VM_FAULT_MINOR)
608                                 continue;
609
610                         remainder = 0;
611                         if (!i)
612                                 i = -EFAULT;
613                         break;
614                 }
615
616                 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
617                 page = pte_page(*pte);
618 same_page:
619                 if (pages) {
620                         get_page(page);
621                         pages[i] = page + pfn_offset;
622                 }
623
624                 if (vmas)
625                         vmas[i] = vma;
626
627                 vaddr += PAGE_SIZE;
628                 ++pfn_offset;
629                 --remainder;
630                 ++i;
631                 if (vaddr < vma->vm_end && remainder &&
632                                 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
633                         /*
634                          * We use pfn_offset to avoid touching the pageframes
635                          * of this compound page.
636                          */
637                         goto same_page;
638                 }
639         }
640         spin_unlock(&mm->page_table_lock);
641         *length = remainder;
642         *position = vaddr;
643
644         return i;
645 }
646
647 void hugetlb_change_protection(struct vm_area_struct *vma,
648                 unsigned long address, unsigned long end, pgprot_t newprot)
649 {
650         struct mm_struct *mm = vma->vm_mm;
651         unsigned long start = address;
652         pte_t *ptep;
653         pte_t pte;
654
655         BUG_ON(address >= end);
656         flush_cache_range(vma, address, end);
657
658         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
659         spin_lock(&mm->page_table_lock);
660         for (; address < end; address += HPAGE_SIZE) {
661                 ptep = huge_pte_offset(mm, address);
662                 if (!ptep)
663                         continue;
664                 if (huge_pmd_unshare(mm, &address, ptep))
665                         continue;
666                 if (!pte_none(*ptep)) {
667                         pte = huge_ptep_get_and_clear(mm, address, ptep);
668                         pte = pte_mkhuge(pte_modify(pte, newprot));
669                         set_huge_pte_at(mm, address, ptep, pte);
670                         lazy_mmu_prot_update(pte);
671                 }
672         }
673         spin_unlock(&mm->page_table_lock);
674         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
675
676         flush_tlb_range(vma, start, end);
677 }
678
679 struct file_region {
680         struct list_head link;
681         long from;
682         long to;
683 };
684
685 static long region_add(struct list_head *head, long f, long t)
686 {
687         struct file_region *rg, *nrg, *trg;
688
689         /* Locate the region we are either in or before. */
690         list_for_each_entry(rg, head, link)
691                 if (f <= rg->to)
692                         break;
693
694         /* Round our left edge to the current segment if it encloses us. */
695         if (f > rg->from)
696                 f = rg->from;
697
698         /* Check for and consume any regions we now overlap with. */
699         nrg = rg;
700         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
701                 if (&rg->link == head)
702                         break;
703                 if (rg->from > t)
704                         break;
705
706                 /* If this area reaches higher then extend our area to
707                  * include it completely.  If this is not the first area
708                  * which we intend to reuse, free it. */
709                 if (rg->to > t)
710                         t = rg->to;
711                 if (rg != nrg) {
712                         list_del(&rg->link);
713                         kfree(rg);
714                 }
715         }
716         nrg->from = f;
717         nrg->to = t;
718         return 0;
719 }
720
721 static long region_chg(struct list_head *head, long f, long t)
722 {
723         struct file_region *rg, *nrg;
724         long chg = 0;
725
726         /* Locate the region we are before or in. */
727         list_for_each_entry(rg, head, link)
728                 if (f <= rg->to)
729                         break;
730
731         /* If we are below the current region then a new region is required.
732          * Subtle, allocate a new region at the position but make it zero
733          * size such that we can guarentee to record the reservation. */
734         if (&rg->link == head || t < rg->from) {
735                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
736                 if (nrg == 0)
737                         return -ENOMEM;
738                 nrg->from = f;
739                 nrg->to   = f;
740                 INIT_LIST_HEAD(&nrg->link);
741                 list_add(&nrg->link, rg->link.prev);
742
743                 return t - f;
744         }
745
746         /* Round our left edge to the current segment if it encloses us. */
747         if (f > rg->from)
748                 f = rg->from;
749         chg = t - f;
750
751         /* Check for and consume any regions we now overlap with. */
752         list_for_each_entry(rg, rg->link.prev, link) {
753                 if (&rg->link == head)
754                         break;
755                 if (rg->from > t)
756                         return chg;
757
758                 /* We overlap with this area, if it extends futher than
759                  * us then we must extend ourselves.  Account for its
760                  * existing reservation. */
761                 if (rg->to > t) {
762                         chg += rg->to - t;
763                         t = rg->to;
764                 }
765                 chg -= rg->to - rg->from;
766         }
767         return chg;
768 }
769
770 static long region_truncate(struct list_head *head, long end)
771 {
772         struct file_region *rg, *trg;
773         long chg = 0;
774
775         /* Locate the region we are either in or before. */
776         list_for_each_entry(rg, head, link)
777                 if (end <= rg->to)
778                         break;
779         if (&rg->link == head)
780                 return 0;
781
782         /* If we are in the middle of a region then adjust it. */
783         if (end > rg->from) {
784                 chg = rg->to - end;
785                 rg->to = end;
786                 rg = list_entry(rg->link.next, typeof(*rg), link);
787         }
788
789         /* Drop any remaining regions. */
790         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
791                 if (&rg->link == head)
792                         break;
793                 chg += rg->to - rg->from;
794                 list_del(&rg->link);
795                 kfree(rg);
796         }
797         return chg;
798 }
799
800 static int hugetlb_acct_memory(long delta)
801 {
802         int ret = -ENOMEM;
803
804         spin_lock(&hugetlb_lock);
805         if ((delta + resv_huge_pages) <= free_huge_pages) {
806                 resv_huge_pages += delta;
807                 ret = 0;
808         }
809         spin_unlock(&hugetlb_lock);
810         return ret;
811 }
812
813 int hugetlb_reserve_pages(struct inode *inode, long from, long to)
814 {
815         long ret, chg;
816
817         chg = region_chg(&inode->i_mapping->private_list, from, to);
818         if (chg < 0)
819                 return chg;
820         ret = hugetlb_acct_memory(chg);
821         if (ret < 0)
822                 return ret;
823         region_add(&inode->i_mapping->private_list, from, to);
824         return 0;
825 }
826
827 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
828 {
829         long chg = region_truncate(&inode->i_mapping->private_list, offset);
830         hugetlb_acct_memory(freed - chg);
831 }