2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
13 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
16 pc->mem_cgroup = NULL;
17 pc->page = pfn_to_page(pfn);
19 static unsigned long total_usage;
21 #if !defined(CONFIG_SPARSEMEM)
24 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
26 pgdat->node_page_cgroup = NULL;
29 struct page_cgroup *lookup_page_cgroup(struct page *page)
31 unsigned long pfn = page_to_pfn(page);
33 struct page_cgroup *base;
35 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
39 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
43 static int __init alloc_node_page_cgroup(int nid)
45 struct page_cgroup *base, *pc;
46 unsigned long table_size;
47 unsigned long start_pfn, nr_pages, index;
49 start_pfn = NODE_DATA(nid)->node_start_pfn;
50 nr_pages = NODE_DATA(nid)->node_spanned_pages;
52 table_size = sizeof(struct page_cgroup) * nr_pages;
54 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
55 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
58 for (index = 0; index < nr_pages; index++) {
60 __init_page_cgroup(pc, start_pfn + index);
62 NODE_DATA(nid)->node_page_cgroup = base;
63 total_usage += table_size;
67 void __init page_cgroup_init(void)
72 if (mem_cgroup_subsys.disabled)
75 for_each_online_node(nid) {
76 fail = alloc_node_page_cgroup(nid);
80 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
81 printk(KERN_INFO "please try cgroup_disable=memory option if you"
85 printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
86 printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
87 panic("Out of memory");
90 #else /* CONFIG_FLAT_NODE_MEM_MAP */
92 struct page_cgroup *lookup_page_cgroup(struct page *page)
94 unsigned long pfn = page_to_pfn(page);
95 struct mem_section *section = __pfn_to_section(pfn);
97 return section->page_cgroup + pfn;
100 /* __alloc_bootmem...() is protected by !slab_available() */
101 int __init_refok init_section_page_cgroup(unsigned long pfn)
103 struct mem_section *section;
104 struct page_cgroup *base, *pc;
105 unsigned long table_size;
108 section = __pfn_to_section(pfn);
110 if (!section->page_cgroup) {
111 nid = page_to_nid(pfn_to_page(pfn));
112 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
113 if (slab_is_available()) {
114 base = kmalloc_node(table_size, GFP_KERNEL, nid);
116 base = vmalloc_node(table_size, nid);
118 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
120 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
124 * We don't have to allocate page_cgroup again, but
125 * address of memmap may be changed. So, we have to initialize
128 base = section->page_cgroup + pfn;
130 /* check address of memmap is changed or not. */
131 if (base->page == pfn_to_page(pfn))
136 printk(KERN_ERR "page cgroup allocation failure\n");
140 for (index = 0; index < PAGES_PER_SECTION; index++) {
142 __init_page_cgroup(pc, pfn + index);
145 section = __pfn_to_section(pfn);
146 section->page_cgroup = base - pfn;
147 total_usage += table_size;
150 #ifdef CONFIG_MEMORY_HOTPLUG
151 void __free_page_cgroup(unsigned long pfn)
153 struct mem_section *ms;
154 struct page_cgroup *base;
156 ms = __pfn_to_section(pfn);
157 if (!ms || !ms->page_cgroup)
159 base = ms->page_cgroup + pfn;
160 if (is_vmalloc_addr(base)) {
162 ms->page_cgroup = NULL;
164 struct page *page = virt_to_page(base);
165 if (!PageReserved(page)) { /* Is bootmem ? */
167 ms->page_cgroup = NULL;
172 int __meminit online_page_cgroup(unsigned long start_pfn,
173 unsigned long nr_pages,
176 unsigned long start, end, pfn;
179 start = start_pfn & ~(PAGES_PER_SECTION - 1);
180 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
182 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
183 if (!pfn_present(pfn))
185 fail = init_section_page_cgroup(pfn);
191 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
192 __free_page_cgroup(pfn);
197 int __meminit offline_page_cgroup(unsigned long start_pfn,
198 unsigned long nr_pages, int nid)
200 unsigned long start, end, pfn;
202 start = start_pfn & ~(PAGES_PER_SECTION - 1);
203 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
205 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
206 __free_page_cgroup(pfn);
211 static int __meminit page_cgroup_callback(struct notifier_block *self,
212 unsigned long action, void *arg)
214 struct memory_notify *mn = arg;
217 case MEM_GOING_ONLINE:
218 ret = online_page_cgroup(mn->start_pfn,
219 mn->nr_pages, mn->status_change_nid);
222 offline_page_cgroup(mn->start_pfn,
223 mn->nr_pages, mn->status_change_nid);
225 case MEM_CANCEL_ONLINE:
226 case MEM_GOING_OFFLINE:
229 case MEM_CANCEL_OFFLINE:
234 ret = notifier_from_errno(ret);
243 void __init page_cgroup_init(void)
248 if (mem_cgroup_subsys.disabled)
251 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
252 if (!pfn_present(pfn))
254 fail = init_section_page_cgroup(pfn);
257 printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
258 panic("Out of memory");
260 hotplug_memory_notifier(page_cgroup_callback, 0);
262 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
263 printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
267 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)