2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
14 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
17 pc->mem_cgroup = NULL;
18 pc->page = pfn_to_page(pfn);
19 INIT_LIST_HEAD(&pc->lru);
21 static unsigned long total_usage;
23 #if !defined(CONFIG_SPARSEMEM)
26 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
28 pgdat->node_page_cgroup = NULL;
31 struct page_cgroup *lookup_page_cgroup(struct page *page)
33 unsigned long pfn = page_to_pfn(page);
35 struct page_cgroup *base;
37 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
41 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
45 static int __init alloc_node_page_cgroup(int nid)
47 struct page_cgroup *base, *pc;
48 unsigned long table_size;
49 unsigned long start_pfn, nr_pages, index;
51 start_pfn = NODE_DATA(nid)->node_start_pfn;
52 nr_pages = NODE_DATA(nid)->node_spanned_pages;
57 table_size = sizeof(struct page_cgroup) * nr_pages;
59 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
60 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
63 for (index = 0; index < nr_pages; index++) {
65 __init_page_cgroup(pc, start_pfn + index);
67 NODE_DATA(nid)->node_page_cgroup = base;
68 total_usage += table_size;
72 void __init page_cgroup_init(void)
77 if (mem_cgroup_disabled())
80 for_each_online_node(nid) {
81 fail = alloc_node_page_cgroup(nid);
85 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
86 printk(KERN_INFO "please try cgroup_disable=memory option if you"
90 printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
91 printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
92 panic("Out of memory");
95 #else /* CONFIG_FLAT_NODE_MEM_MAP */
97 struct page_cgroup *lookup_page_cgroup(struct page *page)
99 unsigned long pfn = page_to_pfn(page);
100 struct mem_section *section = __pfn_to_section(pfn);
102 return section->page_cgroup + pfn;
105 /* __alloc_bootmem...() is protected by !slab_available() */
106 static int __init_refok init_section_page_cgroup(unsigned long pfn)
108 struct mem_section *section = __pfn_to_section(pfn);
109 struct page_cgroup *base, *pc;
110 unsigned long table_size;
113 if (!section->page_cgroup) {
114 nid = page_to_nid(pfn_to_page(pfn));
115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
116 if (slab_is_available()) {
117 base = kmalloc_node(table_size, GFP_KERNEL, nid);
119 base = vmalloc_node(table_size, nid);
121 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
123 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
127 * We don't have to allocate page_cgroup again, but
128 * address of memmap may be changed. So, we have to initialize
131 base = section->page_cgroup + pfn;
133 /* check address of memmap is changed or not. */
134 if (base->page == pfn_to_page(pfn))
139 printk(KERN_ERR "page cgroup allocation failure\n");
143 for (index = 0; index < PAGES_PER_SECTION; index++) {
145 __init_page_cgroup(pc, pfn + index);
148 section->page_cgroup = base - pfn;
149 total_usage += table_size;
152 #ifdef CONFIG_MEMORY_HOTPLUG
153 void __free_page_cgroup(unsigned long pfn)
155 struct mem_section *ms;
156 struct page_cgroup *base;
158 ms = __pfn_to_section(pfn);
159 if (!ms || !ms->page_cgroup)
161 base = ms->page_cgroup + pfn;
162 if (is_vmalloc_addr(base)) {
164 ms->page_cgroup = NULL;
166 struct page *page = virt_to_page(base);
167 if (!PageReserved(page)) { /* Is bootmem ? */
169 ms->page_cgroup = NULL;
174 int __meminit online_page_cgroup(unsigned long start_pfn,
175 unsigned long nr_pages,
178 unsigned long start, end, pfn;
181 start = start_pfn & ~(PAGES_PER_SECTION - 1);
182 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
184 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
185 if (!pfn_present(pfn))
187 fail = init_section_page_cgroup(pfn);
193 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
194 __free_page_cgroup(pfn);
199 int __meminit offline_page_cgroup(unsigned long start_pfn,
200 unsigned long nr_pages, int nid)
202 unsigned long start, end, pfn;
204 start = start_pfn & ~(PAGES_PER_SECTION - 1);
205 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
207 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
208 __free_page_cgroup(pfn);
213 static int __meminit page_cgroup_callback(struct notifier_block *self,
214 unsigned long action, void *arg)
216 struct memory_notify *mn = arg;
219 case MEM_GOING_ONLINE:
220 ret = online_page_cgroup(mn->start_pfn,
221 mn->nr_pages, mn->status_change_nid);
224 offline_page_cgroup(mn->start_pfn,
225 mn->nr_pages, mn->status_change_nid);
227 case MEM_CANCEL_ONLINE:
228 case MEM_GOING_OFFLINE:
231 case MEM_CANCEL_OFFLINE:
236 ret = notifier_from_errno(ret);
245 void __init page_cgroup_init(void)
250 if (mem_cgroup_disabled())
253 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
254 if (!pfn_present(pfn))
256 fail = init_section_page_cgroup(pfn);
259 printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
260 panic("Out of memory");
262 hotplug_memory_notifier(page_cgroup_callback, 0);
264 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
265 printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
269 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
277 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
279 static DEFINE_MUTEX(swap_cgroup_mutex);
280 struct swap_cgroup_ctrl {
282 unsigned long length;
285 struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
288 * This 8bytes seems big..maybe we can reduce this when we can use "id" for
289 * cgroup rather than pointer.
292 struct mem_cgroup *val;
294 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
295 #define SC_POS_MASK (SC_PER_PAGE - 1)
298 * SwapCgroup implements "lookup" and "exchange" operations.
299 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
300 * against SwapCache. At swap_free(), this is accessed directly from swap.
303 * - we have no race in "exchange" when we're accessed via SwapCache because
304 * SwapCache(and its swp_entry) is under lock.
305 * - When called via swap_free(), there is no user of this entry and no race.
306 * Then, we don't need lock around "exchange".
308 * TODO: we can push these buffers out to HIGHMEM.
312 * allocate buffer for swap_cgroup.
314 static int swap_cgroup_prepare(int type)
317 struct swap_cgroup_ctrl *ctrl;
318 unsigned long idx, max;
320 if (!do_swap_account)
322 ctrl = &swap_cgroup_ctrl[type];
324 for (idx = 0; idx < ctrl->length; idx++) {
325 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
327 goto not_enough_page;
328 ctrl->map[idx] = page;
333 for (idx = 0; idx < max; idx++)
334 __free_page(ctrl->map[idx]);
340 * swap_cgroup_record - record mem_cgroup for this swp_entry.
341 * @ent: swap entry to be recorded into
342 * @mem: mem_cgroup to be recorded
344 * Returns old value at success, NULL at failure.
345 * (Of course, old value can be NULL.)
347 struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
349 int type = swp_type(ent);
350 unsigned long offset = swp_offset(ent);
351 unsigned long idx = offset / SC_PER_PAGE;
352 unsigned long pos = offset & SC_POS_MASK;
353 struct swap_cgroup_ctrl *ctrl;
354 struct page *mappage;
355 struct swap_cgroup *sc;
356 struct mem_cgroup *old;
358 if (!do_swap_account)
361 ctrl = &swap_cgroup_ctrl[type];
363 mappage = ctrl->map[idx];
364 sc = page_address(mappage);
373 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
374 * @ent: swap entry to be looked up.
376 * Returns pointer to mem_cgroup at success. NULL at failure.
378 struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
380 int type = swp_type(ent);
381 unsigned long offset = swp_offset(ent);
382 unsigned long idx = offset / SC_PER_PAGE;
383 unsigned long pos = offset & SC_POS_MASK;
384 struct swap_cgroup_ctrl *ctrl;
385 struct page *mappage;
386 struct swap_cgroup *sc;
387 struct mem_cgroup *ret;
389 if (!do_swap_account)
392 ctrl = &swap_cgroup_ctrl[type];
393 mappage = ctrl->map[idx];
394 sc = page_address(mappage);
400 int swap_cgroup_swapon(int type, unsigned long max_pages)
403 unsigned long array_size;
404 unsigned long length;
405 struct swap_cgroup_ctrl *ctrl;
407 if (!do_swap_account)
410 length = ((max_pages/SC_PER_PAGE) + 1);
411 array_size = length * sizeof(void *);
413 array = vmalloc(array_size);
417 memset(array, 0, array_size);
418 ctrl = &swap_cgroup_ctrl[type];
419 mutex_lock(&swap_cgroup_mutex);
420 ctrl->length = length;
422 if (swap_cgroup_prepare(type)) {
423 /* memory shortage */
427 mutex_unlock(&swap_cgroup_mutex);
430 mutex_unlock(&swap_cgroup_mutex);
433 "swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
434 " and %ld bytes to hold mem_cgroup pointers on swap\n",
435 array_size, length * PAGE_SIZE);
437 "swap_cgroup can be disabled by noswapaccount boot option.\n");
441 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
443 "swap_cgroup can be disabled by noswapaccount boot option\n");
447 void swap_cgroup_swapoff(int type)
450 struct swap_cgroup_ctrl *ctrl;
452 if (!do_swap_account)
455 mutex_lock(&swap_cgroup_mutex);
456 ctrl = &swap_cgroup_ctrl[type];
458 for (i = 0; i < ctrl->length; i++) {
459 struct page *page = ctrl->map[i];
467 mutex_unlock(&swap_cgroup_mutex);