slab: avoid double initialization & do initialization in 1 place
[linux-2.6] / mm / memory_hotplug.c
1 /*
2  *  linux/mm/memory_hotplug.c
3  *
4  *  Copyright (C)
5  */
6
7 #include <linux/stddef.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/bootmem.h>
13 #include <linux/compiler.h>
14 #include <linux/module.h>
15 #include <linux/pagevec.h>
16 #include <linux/writeback.h>
17 #include <linux/slab.h>
18 #include <linux/sysctl.h>
19 #include <linux/cpu.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/highmem.h>
23 #include <linux/vmalloc.h>
24 #include <linux/ioport.h>
25 #include <linux/cpuset.h>
26 #include <linux/delay.h>
27 #include <linux/migrate.h>
28 #include <linux/page-isolation.h>
29
30 #include <asm/tlbflush.h>
31
32 /* add this memory to iomem resource */
33 static struct resource *register_memory_resource(u64 start, u64 size)
34 {
35         struct resource *res;
36         res = kzalloc(sizeof(struct resource), GFP_KERNEL);
37         BUG_ON(!res);
38
39         res->name = "System RAM";
40         res->start = start;
41         res->end = start + size - 1;
42         res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
43         if (request_resource(&iomem_resource, res) < 0) {
44                 printk("System RAM resource %llx - %llx cannot be added\n",
45                 (unsigned long long)res->start, (unsigned long long)res->end);
46                 kfree(res);
47                 res = NULL;
48         }
49         return res;
50 }
51
52 static void release_memory_resource(struct resource *res)
53 {
54         if (!res)
55                 return;
56         release_resource(res);
57         kfree(res);
58         return;
59 }
60
61
62 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
63 static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
64 {
65         struct pglist_data *pgdat = zone->zone_pgdat;
66         int nr_pages = PAGES_PER_SECTION;
67         int nid = pgdat->node_id;
68         int zone_type;
69
70         zone_type = zone - pgdat->node_zones;
71         if (!zone->wait_table) {
72                 int ret = 0;
73                 ret = init_currently_empty_zone(zone, phys_start_pfn,
74                                                 nr_pages, MEMMAP_HOTPLUG);
75                 if (ret < 0)
76                         return ret;
77         }
78         memmap_init_zone(nr_pages, nid, zone_type,
79                          phys_start_pfn, MEMMAP_HOTPLUG);
80         return 0;
81 }
82
83 static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
84 {
85         int nr_pages = PAGES_PER_SECTION;
86         int ret;
87
88         if (pfn_valid(phys_start_pfn))
89                 return -EEXIST;
90
91         ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
92
93         if (ret < 0)
94                 return ret;
95
96         ret = __add_zone(zone, phys_start_pfn);
97
98         if (ret < 0)
99                 return ret;
100
101         return register_new_memory(__pfn_to_section(phys_start_pfn));
102 }
103
104 /*
105  * Reasonably generic function for adding memory.  It is
106  * expected that archs that support memory hotplug will
107  * call this function after deciding the zone to which to
108  * add the new pages.
109  */
110 int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
111                  unsigned long nr_pages)
112 {
113         unsigned long i;
114         int err = 0;
115         int start_sec, end_sec;
116         /* during initialize mem_map, align hot-added range to section */
117         start_sec = pfn_to_section_nr(phys_start_pfn);
118         end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
119
120         for (i = start_sec; i <= end_sec; i++) {
121                 err = __add_section(zone, i << PFN_SECTION_SHIFT);
122
123                 /*
124                  * EEXIST is finally dealt with by ioresource collision
125                  * check. see add_memory() => register_memory_resource()
126                  * Warning will be printed if there is collision.
127                  */
128                 if (err && (err != -EEXIST))
129                         break;
130                 err = 0;
131         }
132
133         return err;
134 }
135 EXPORT_SYMBOL_GPL(__add_pages);
136
137 static void grow_zone_span(struct zone *zone,
138                 unsigned long start_pfn, unsigned long end_pfn)
139 {
140         unsigned long old_zone_end_pfn;
141
142         zone_span_writelock(zone);
143
144         old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
145         if (start_pfn < zone->zone_start_pfn)
146                 zone->zone_start_pfn = start_pfn;
147
148         zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
149                                 zone->zone_start_pfn;
150
151         zone_span_writeunlock(zone);
152 }
153
154 static void grow_pgdat_span(struct pglist_data *pgdat,
155                 unsigned long start_pfn, unsigned long end_pfn)
156 {
157         unsigned long old_pgdat_end_pfn =
158                 pgdat->node_start_pfn + pgdat->node_spanned_pages;
159
160         if (start_pfn < pgdat->node_start_pfn)
161                 pgdat->node_start_pfn = start_pfn;
162
163         pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
164                                         pgdat->node_start_pfn;
165 }
166
167 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
168                         void *arg)
169 {
170         unsigned long i;
171         unsigned long onlined_pages = *(unsigned long *)arg;
172         struct page *page;
173         if (PageReserved(pfn_to_page(start_pfn)))
174                 for (i = 0; i < nr_pages; i++) {
175                         page = pfn_to_page(start_pfn + i);
176                         online_page(page);
177                         onlined_pages++;
178                 }
179         *(unsigned long *)arg = onlined_pages;
180         return 0;
181 }
182
183
184 int online_pages(unsigned long pfn, unsigned long nr_pages)
185 {
186         unsigned long flags;
187         unsigned long onlined_pages = 0;
188         struct zone *zone;
189         int need_zonelists_rebuild = 0;
190         int nid;
191         int ret;
192         struct memory_notify arg;
193
194         arg.start_pfn = pfn;
195         arg.nr_pages = nr_pages;
196         arg.status_change_nid = -1;
197
198         nid = page_to_nid(pfn_to_page(pfn));
199         if (node_present_pages(nid) == 0)
200                 arg.status_change_nid = nid;
201
202         ret = memory_notify(MEM_GOING_ONLINE, &arg);
203         ret = notifier_to_errno(ret);
204         if (ret) {
205                 memory_notify(MEM_CANCEL_ONLINE, &arg);
206                 return ret;
207         }
208         /*
209          * This doesn't need a lock to do pfn_to_page().
210          * The section can't be removed here because of the
211          * memory_block->state_sem.
212          */
213         zone = page_zone(pfn_to_page(pfn));
214         pgdat_resize_lock(zone->zone_pgdat, &flags);
215         grow_zone_span(zone, pfn, pfn + nr_pages);
216         grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
217         pgdat_resize_unlock(zone->zone_pgdat, &flags);
218
219         /*
220          * If this zone is not populated, then it is not in zonelist.
221          * This means the page allocator ignores this zone.
222          * So, zonelist must be updated after online.
223          */
224         if (!populated_zone(zone))
225                 need_zonelists_rebuild = 1;
226
227         walk_memory_resource(pfn, nr_pages, &onlined_pages,
228                 online_pages_range);
229         zone->present_pages += onlined_pages;
230         zone->zone_pgdat->node_present_pages += onlined_pages;
231
232         setup_per_zone_pages_min();
233         if (onlined_pages) {
234                 kswapd_run(zone_to_nid(zone));
235                 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
236         }
237
238         if (need_zonelists_rebuild)
239                 build_all_zonelists();
240         vm_total_pages = nr_free_pagecache_pages();
241         writeback_set_ratelimit();
242
243         if (onlined_pages)
244                 memory_notify(MEM_ONLINE, &arg);
245
246         return 0;
247 }
248 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
249
250 static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
251 {
252         struct pglist_data *pgdat;
253         unsigned long zones_size[MAX_NR_ZONES] = {0};
254         unsigned long zholes_size[MAX_NR_ZONES] = {0};
255         unsigned long start_pfn = start >> PAGE_SHIFT;
256
257         pgdat = arch_alloc_nodedata(nid);
258         if (!pgdat)
259                 return NULL;
260
261         arch_refresh_nodedata(nid, pgdat);
262
263         /* we can use NODE_DATA(nid) from here */
264
265         /* init node's zones as empty zones, we don't have any present pages.*/
266         free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
267
268         return pgdat;
269 }
270
271 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
272 {
273         arch_refresh_nodedata(nid, NULL);
274         arch_free_nodedata(pgdat);
275         return;
276 }
277
278
279 int add_memory(int nid, u64 start, u64 size)
280 {
281         pg_data_t *pgdat = NULL;
282         int new_pgdat = 0;
283         struct resource *res;
284         int ret;
285
286         res = register_memory_resource(start, size);
287         if (!res)
288                 return -EEXIST;
289
290         if (!node_online(nid)) {
291                 pgdat = hotadd_new_pgdat(nid, start);
292                 if (!pgdat)
293                         return -ENOMEM;
294                 new_pgdat = 1;
295         }
296
297         /* call arch's memory hotadd */
298         ret = arch_add_memory(nid, start, size);
299
300         if (ret < 0)
301                 goto error;
302
303         /* we online node here. we can't roll back from here. */
304         node_set_online(nid);
305
306         cpuset_track_online_nodes();
307
308         if (new_pgdat) {
309                 ret = register_one_node(nid);
310                 /*
311                  * If sysfs file of new node can't create, cpu on the node
312                  * can't be hot-added. There is no rollback way now.
313                  * So, check by BUG_ON() to catch it reluctantly..
314                  */
315                 BUG_ON(ret);
316         }
317
318         return ret;
319 error:
320         /* rollback pgdat allocation and others */
321         if (new_pgdat)
322                 rollback_node_hotadd(nid, pgdat);
323         if (res)
324                 release_memory_resource(res);
325
326         return ret;
327 }
328 EXPORT_SYMBOL_GPL(add_memory);
329
330 #ifdef CONFIG_MEMORY_HOTREMOVE
331 /*
332  * Confirm all pages in a range [start, end) is belongs to the same zone.
333  */
334 static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
335 {
336         unsigned long pfn;
337         struct zone *zone = NULL;
338         struct page *page;
339         int i;
340         for (pfn = start_pfn;
341              pfn < end_pfn;
342              pfn += MAX_ORDER_NR_PAGES) {
343                 i = 0;
344                 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
345                 while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
346                         i++;
347                 if (i == MAX_ORDER_NR_PAGES)
348                         continue;
349                 page = pfn_to_page(pfn + i);
350                 if (zone && page_zone(page) != zone)
351                         return 0;
352                 zone = page_zone(page);
353         }
354         return 1;
355 }
356
357 /*
358  * Scanning pfn is much easier than scanning lru list.
359  * Scan pfn from start to end and Find LRU page.
360  */
361 int scan_lru_pages(unsigned long start, unsigned long end)
362 {
363         unsigned long pfn;
364         struct page *page;
365         for (pfn = start; pfn < end; pfn++) {
366                 if (pfn_valid(pfn)) {
367                         page = pfn_to_page(pfn);
368                         if (PageLRU(page))
369                                 return pfn;
370                 }
371         }
372         return 0;
373 }
374
375 static struct page *
376 hotremove_migrate_alloc(struct page *page,
377                         unsigned long private,
378                         int **x)
379 {
380         /* This should be improoooooved!! */
381         return alloc_page(GFP_HIGHUSER_PAGECACHE);
382 }
383
384
385 #define NR_OFFLINE_AT_ONCE_PAGES        (256)
386 static int
387 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
388 {
389         unsigned long pfn;
390         struct page *page;
391         int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
392         int not_managed = 0;
393         int ret = 0;
394         LIST_HEAD(source);
395
396         for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
397                 if (!pfn_valid(pfn))
398                         continue;
399                 page = pfn_to_page(pfn);
400                 if (!page_count(page))
401                         continue;
402                 /*
403                  * We can skip free pages. And we can only deal with pages on
404                  * LRU.
405                  */
406                 ret = isolate_lru_page(page, &source);
407                 if (!ret) { /* Success */
408                         move_pages--;
409                 } else {
410                         /* Becasue we don't have big zone->lock. we should
411                            check this again here. */
412                         if (page_count(page))
413                                 not_managed++;
414 #ifdef CONFIG_DEBUG_VM
415                         printk(KERN_INFO "removing from LRU failed"
416                                          " %lx/%d/%lx\n",
417                                 pfn, page_count(page), page->flags);
418 #endif
419                 }
420         }
421         ret = -EBUSY;
422         if (not_managed) {
423                 if (!list_empty(&source))
424                         putback_lru_pages(&source);
425                 goto out;
426         }
427         ret = 0;
428         if (list_empty(&source))
429                 goto out;
430         /* this function returns # of failed pages */
431         ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
432
433 out:
434         return ret;
435 }
436
437 /*
438  * remove from free_area[] and mark all as Reserved.
439  */
440 static int
441 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
442                         void *data)
443 {
444         __offline_isolated_pages(start, start + nr_pages);
445         return 0;
446 }
447
448 static void
449 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
450 {
451         walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL,
452                                 offline_isolated_pages_cb);
453 }
454
455 /*
456  * Check all pages in range, recoreded as memory resource, are isolated.
457  */
458 static int
459 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
460                         void *data)
461 {
462         int ret;
463         long offlined = *(long *)data;
464         ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
465         offlined = nr_pages;
466         if (!ret)
467                 *(long *)data += offlined;
468         return ret;
469 }
470
471 static long
472 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
473 {
474         long offlined = 0;
475         int ret;
476
477         ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined,
478                         check_pages_isolated_cb);
479         if (ret < 0)
480                 offlined = (long)ret;
481         return offlined;
482 }
483
484 int offline_pages(unsigned long start_pfn,
485                   unsigned long end_pfn, unsigned long timeout)
486 {
487         unsigned long pfn, nr_pages, expire;
488         long offlined_pages;
489         int ret, drain, retry_max, node;
490         struct zone *zone;
491         struct memory_notify arg;
492
493         BUG_ON(start_pfn >= end_pfn);
494         /* at least, alignment against pageblock is necessary */
495         if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
496                 return -EINVAL;
497         if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
498                 return -EINVAL;
499         /* This makes hotplug much easier...and readable.
500            we assume this for now. .*/
501         if (!test_pages_in_a_zone(start_pfn, end_pfn))
502                 return -EINVAL;
503
504         zone = page_zone(pfn_to_page(start_pfn));
505         node = zone_to_nid(zone);
506         nr_pages = end_pfn - start_pfn;
507
508         /* set above range as isolated */
509         ret = start_isolate_page_range(start_pfn, end_pfn);
510         if (ret)
511                 return ret;
512
513         arg.start_pfn = start_pfn;
514         arg.nr_pages = nr_pages;
515         arg.status_change_nid = -1;
516         if (nr_pages >= node_present_pages(node))
517                 arg.status_change_nid = node;
518
519         ret = memory_notify(MEM_GOING_OFFLINE, &arg);
520         ret = notifier_to_errno(ret);
521         if (ret)
522                 goto failed_removal;
523
524         pfn = start_pfn;
525         expire = jiffies + timeout;
526         drain = 0;
527         retry_max = 5;
528 repeat:
529         /* start memory hot removal */
530         ret = -EAGAIN;
531         if (time_after(jiffies, expire))
532                 goto failed_removal;
533         ret = -EINTR;
534         if (signal_pending(current))
535                 goto failed_removal;
536         ret = 0;
537         if (drain) {
538                 lru_add_drain_all();
539                 flush_scheduled_work();
540                 cond_resched();
541                 drain_all_pages();
542         }
543
544         pfn = scan_lru_pages(start_pfn, end_pfn);
545         if (pfn) { /* We have page on LRU */
546                 ret = do_migrate_range(pfn, end_pfn);
547                 if (!ret) {
548                         drain = 1;
549                         goto repeat;
550                 } else {
551                         if (ret < 0)
552                                 if (--retry_max == 0)
553                                         goto failed_removal;
554                         yield();
555                         drain = 1;
556                         goto repeat;
557                 }
558         }
559         /* drain all zone's lru pagevec, this is asyncronous... */
560         lru_add_drain_all();
561         flush_scheduled_work();
562         yield();
563         /* drain pcp pages , this is synchrouns. */
564         drain_all_pages();
565         /* check again */
566         offlined_pages = check_pages_isolated(start_pfn, end_pfn);
567         if (offlined_pages < 0) {
568                 ret = -EBUSY;
569                 goto failed_removal;
570         }
571         printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
572         /* Ok, all of our target is islaoted.
573            We cannot do rollback at this point. */
574         offline_isolated_pages(start_pfn, end_pfn);
575         /* reset pagetype flags and makes migrate type to be MOVABLE */
576         undo_isolate_page_range(start_pfn, end_pfn);
577         /* removal success */
578         zone->present_pages -= offlined_pages;
579         zone->zone_pgdat->node_present_pages -= offlined_pages;
580         totalram_pages -= offlined_pages;
581         num_physpages -= offlined_pages;
582
583         vm_total_pages = nr_free_pagecache_pages();
584         writeback_set_ratelimit();
585
586         memory_notify(MEM_OFFLINE, &arg);
587         return 0;
588
589 failed_removal:
590         printk(KERN_INFO "memory offlining %lx to %lx failed\n",
591                 start_pfn, end_pfn);
592         memory_notify(MEM_CANCEL_OFFLINE, &arg);
593         /* pushback to free area */
594         undo_isolate_page_range(start_pfn, end_pfn);
595
596         return ret;
597 }
598 #else
599 int remove_memory(u64 start, u64 size)
600 {
601         return -EINVAL;
602 }
603 EXPORT_SYMBOL_GPL(remove_memory);
604 #endif /* CONFIG_MEMORY_HOTREMOVE */