Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile
[linux-2.6] / fs / proc / vmcore.c
1 /*
2  *      fs/proc/vmcore.c Interface for accessing the crash
3  *                               dump from the system's previous life.
4  *      Heavily borrowed from fs/proc/kcore.c
5  *      Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6  *      Copyright (C) IBM Corporation, 2004. All rights reserved
7  *
8  */
9
10 #include <linux/mm.h>
11 #include <linux/proc_fs.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/highmem.h>
16 #include <linux/bootmem.h>
17 #include <linux/init.h>
18 #include <linux/crash_dump.h>
19 #include <linux/list.h>
20 #include <asm/uaccess.h>
21 #include <asm/io.h>
22
23 /* List representing chunks of contiguous memory areas and their offsets in
24  * vmcore file.
25  */
26 static LIST_HEAD(vmcore_list);
27
28 /* Stores the pointer to the buffer containing kernel elf core headers. */
29 static char *elfcorebuf;
30 static size_t elfcorebuf_sz;
31
32 /* Total size of vmcore file. */
33 static u64 vmcore_size;
34
35 struct proc_dir_entry *proc_vmcore = NULL;
36
37 /* Reads a page from the oldmem device from given offset. */
38 static ssize_t read_from_oldmem(char *buf, size_t count,
39                                 u64 *ppos, int userbuf)
40 {
41         unsigned long pfn, offset;
42         size_t nr_bytes;
43         ssize_t read = 0, tmp;
44
45         if (!count)
46                 return 0;
47
48         offset = (unsigned long)(*ppos % PAGE_SIZE);
49         pfn = (unsigned long)(*ppos / PAGE_SIZE);
50         if (pfn > saved_max_pfn)
51                 return -EINVAL;
52
53         do {
54                 if (count > (PAGE_SIZE - offset))
55                         nr_bytes = PAGE_SIZE - offset;
56                 else
57                         nr_bytes = count;
58
59                 tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf);
60                 if (tmp < 0)
61                         return tmp;
62                 *ppos += nr_bytes;
63                 count -= nr_bytes;
64                 buf += nr_bytes;
65                 read += nr_bytes;
66                 ++pfn;
67                 offset = 0;
68         } while (count);
69
70         return read;
71 }
72
73 /* Maps vmcore file offset to respective physical address in memroy. */
74 static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
75                                         struct vmcore **m_ptr)
76 {
77         struct vmcore *m;
78         u64 paddr;
79
80         list_for_each_entry(m, vc_list, list) {
81                 u64 start, end;
82                 start = m->offset;
83                 end = m->offset + m->size - 1;
84                 if (offset >= start && offset <= end) {
85                         paddr = m->paddr + offset - start;
86                         *m_ptr = m;
87                         return paddr;
88                 }
89         }
90         *m_ptr = NULL;
91         return 0;
92 }
93
94 /* Read from the ELF header and then the crash dump. On error, negative value is
95  * returned otherwise number of bytes read are returned.
96  */
97 static ssize_t read_vmcore(struct file *file, char __user *buffer,
98                                 size_t buflen, loff_t *fpos)
99 {
100         ssize_t acc = 0, tmp;
101         size_t tsz;
102         u64 start, nr_bytes;
103         struct vmcore *curr_m = NULL;
104
105         if (buflen == 0 || *fpos >= vmcore_size)
106                 return 0;
107
108         /* trim buflen to not go beyond EOF */
109         if (buflen > vmcore_size - *fpos)
110                 buflen = vmcore_size - *fpos;
111
112         /* Read ELF core header */
113         if (*fpos < elfcorebuf_sz) {
114                 tsz = elfcorebuf_sz - *fpos;
115                 if (buflen < tsz)
116                         tsz = buflen;
117                 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
118                         return -EFAULT;
119                 buflen -= tsz;
120                 *fpos += tsz;
121                 buffer += tsz;
122                 acc += tsz;
123
124                 /* leave now if filled buffer already */
125                 if (buflen == 0)
126                         return acc;
127         }
128
129         start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
130         if (!curr_m)
131                 return -EINVAL;
132         if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
133                 tsz = buflen;
134
135         /* Calculate left bytes in current memory segment. */
136         nr_bytes = (curr_m->size - (start - curr_m->paddr));
137         if (tsz > nr_bytes)
138                 tsz = nr_bytes;
139
140         while (buflen) {
141                 tmp = read_from_oldmem(buffer, tsz, &start, 1);
142                 if (tmp < 0)
143                         return tmp;
144                 buflen -= tsz;
145                 *fpos += tsz;
146                 buffer += tsz;
147                 acc += tsz;
148                 if (start >= (curr_m->paddr + curr_m->size)) {
149                         if (curr_m->list.next == &vmcore_list)
150                                 return acc;     /*EOF*/
151                         curr_m = list_entry(curr_m->list.next,
152                                                 struct vmcore, list);
153                         start = curr_m->paddr;
154                 }
155                 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
156                         tsz = buflen;
157                 /* Calculate left bytes in current memory segment. */
158                 nr_bytes = (curr_m->size - (start - curr_m->paddr));
159                 if (tsz > nr_bytes)
160                         tsz = nr_bytes;
161         }
162         return acc;
163 }
164
165 const struct file_operations proc_vmcore_operations = {
166         .read           = read_vmcore,
167 };
168
169 static struct vmcore* __init get_new_element(void)
170 {
171         struct vmcore *p;
172
173         p = kmalloc(sizeof(*p), GFP_KERNEL);
174         if (p)
175                 memset(p, 0, sizeof(*p));
176         return p;
177 }
178
179 static u64 __init get_vmcore_size_elf64(char *elfptr)
180 {
181         int i;
182         u64 size;
183         Elf64_Ehdr *ehdr_ptr;
184         Elf64_Phdr *phdr_ptr;
185
186         ehdr_ptr = (Elf64_Ehdr *)elfptr;
187         phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
188         size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr));
189         for (i = 0; i < ehdr_ptr->e_phnum; i++) {
190                 size += phdr_ptr->p_memsz;
191                 phdr_ptr++;
192         }
193         return size;
194 }
195
196 static u64 __init get_vmcore_size_elf32(char *elfptr)
197 {
198         int i;
199         u64 size;
200         Elf32_Ehdr *ehdr_ptr;
201         Elf32_Phdr *phdr_ptr;
202
203         ehdr_ptr = (Elf32_Ehdr *)elfptr;
204         phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
205         size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr));
206         for (i = 0; i < ehdr_ptr->e_phnum; i++) {
207                 size += phdr_ptr->p_memsz;
208                 phdr_ptr++;
209         }
210         return size;
211 }
212
213 /* Merges all the PT_NOTE headers into one. */
214 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
215                                                 struct list_head *vc_list)
216 {
217         int i, nr_ptnote=0, rc=0;
218         char *tmp;
219         Elf64_Ehdr *ehdr_ptr;
220         Elf64_Phdr phdr, *phdr_ptr;
221         Elf64_Nhdr *nhdr_ptr;
222         u64 phdr_sz = 0, note_off;
223
224         ehdr_ptr = (Elf64_Ehdr *)elfptr;
225         phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
226         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
227                 int j;
228                 void *notes_section;
229                 struct vmcore *new;
230                 u64 offset, max_sz, sz, real_sz = 0;
231                 if (phdr_ptr->p_type != PT_NOTE)
232                         continue;
233                 nr_ptnote++;
234                 max_sz = phdr_ptr->p_memsz;
235                 offset = phdr_ptr->p_offset;
236                 notes_section = kmalloc(max_sz, GFP_KERNEL);
237                 if (!notes_section)
238                         return -ENOMEM;
239                 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
240                 if (rc < 0) {
241                         kfree(notes_section);
242                         return rc;
243                 }
244                 nhdr_ptr = notes_section;
245                 for (j = 0; j < max_sz; j += sz) {
246                         if (nhdr_ptr->n_namesz == 0)
247                                 break;
248                         sz = sizeof(Elf64_Nhdr) +
249                                 ((nhdr_ptr->n_namesz + 3) & ~3) +
250                                 ((nhdr_ptr->n_descsz + 3) & ~3);
251                         real_sz += sz;
252                         nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
253                 }
254
255                 /* Add this contiguous chunk of notes section to vmcore list.*/
256                 new = get_new_element();
257                 if (!new) {
258                         kfree(notes_section);
259                         return -ENOMEM;
260                 }
261                 new->paddr = phdr_ptr->p_offset;
262                 new->size = real_sz;
263                 list_add_tail(&new->list, vc_list);
264                 phdr_sz += real_sz;
265                 kfree(notes_section);
266         }
267
268         /* Prepare merged PT_NOTE program header. */
269         phdr.p_type    = PT_NOTE;
270         phdr.p_flags   = 0;
271         note_off = sizeof(Elf64_Ehdr) +
272                         (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
273         phdr.p_offset  = note_off;
274         phdr.p_vaddr   = phdr.p_paddr = 0;
275         phdr.p_filesz  = phdr.p_memsz = phdr_sz;
276         phdr.p_align   = 0;
277
278         /* Add merged PT_NOTE program header*/
279         tmp = elfptr + sizeof(Elf64_Ehdr);
280         memcpy(tmp, &phdr, sizeof(phdr));
281         tmp += sizeof(phdr);
282
283         /* Remove unwanted PT_NOTE program headers. */
284         i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
285         *elfsz = *elfsz - i;
286         memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
287
288         /* Modify e_phnum to reflect merged headers. */
289         ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
290
291         return 0;
292 }
293
294 /* Merges all the PT_NOTE headers into one. */
295 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
296                                                 struct list_head *vc_list)
297 {
298         int i, nr_ptnote=0, rc=0;
299         char *tmp;
300         Elf32_Ehdr *ehdr_ptr;
301         Elf32_Phdr phdr, *phdr_ptr;
302         Elf32_Nhdr *nhdr_ptr;
303         u64 phdr_sz = 0, note_off;
304
305         ehdr_ptr = (Elf32_Ehdr *)elfptr;
306         phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
307         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
308                 int j;
309                 void *notes_section;
310                 struct vmcore *new;
311                 u64 offset, max_sz, sz, real_sz = 0;
312                 if (phdr_ptr->p_type != PT_NOTE)
313                         continue;
314                 nr_ptnote++;
315                 max_sz = phdr_ptr->p_memsz;
316                 offset = phdr_ptr->p_offset;
317                 notes_section = kmalloc(max_sz, GFP_KERNEL);
318                 if (!notes_section)
319                         return -ENOMEM;
320                 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
321                 if (rc < 0) {
322                         kfree(notes_section);
323                         return rc;
324                 }
325                 nhdr_ptr = notes_section;
326                 for (j = 0; j < max_sz; j += sz) {
327                         if (nhdr_ptr->n_namesz == 0)
328                                 break;
329                         sz = sizeof(Elf32_Nhdr) +
330                                 ((nhdr_ptr->n_namesz + 3) & ~3) +
331                                 ((nhdr_ptr->n_descsz + 3) & ~3);
332                         real_sz += sz;
333                         nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
334                 }
335
336                 /* Add this contiguous chunk of notes section to vmcore list.*/
337                 new = get_new_element();
338                 if (!new) {
339                         kfree(notes_section);
340                         return -ENOMEM;
341                 }
342                 new->paddr = phdr_ptr->p_offset;
343                 new->size = real_sz;
344                 list_add_tail(&new->list, vc_list);
345                 phdr_sz += real_sz;
346                 kfree(notes_section);
347         }
348
349         /* Prepare merged PT_NOTE program header. */
350         phdr.p_type    = PT_NOTE;
351         phdr.p_flags   = 0;
352         note_off = sizeof(Elf32_Ehdr) +
353                         (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
354         phdr.p_offset  = note_off;
355         phdr.p_vaddr   = phdr.p_paddr = 0;
356         phdr.p_filesz  = phdr.p_memsz = phdr_sz;
357         phdr.p_align   = 0;
358
359         /* Add merged PT_NOTE program header*/
360         tmp = elfptr + sizeof(Elf32_Ehdr);
361         memcpy(tmp, &phdr, sizeof(phdr));
362         tmp += sizeof(phdr);
363
364         /* Remove unwanted PT_NOTE program headers. */
365         i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
366         *elfsz = *elfsz - i;
367         memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
368
369         /* Modify e_phnum to reflect merged headers. */
370         ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
371
372         return 0;
373 }
374
375 /* Add memory chunks represented by program headers to vmcore list. Also update
376  * the new offset fields of exported program headers. */
377 static int __init process_ptload_program_headers_elf64(char *elfptr,
378                                                 size_t elfsz,
379                                                 struct list_head *vc_list)
380 {
381         int i;
382         Elf64_Ehdr *ehdr_ptr;
383         Elf64_Phdr *phdr_ptr;
384         loff_t vmcore_off;
385         struct vmcore *new;
386
387         ehdr_ptr = (Elf64_Ehdr *)elfptr;
388         phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
389
390         /* First program header is PT_NOTE header. */
391         vmcore_off = sizeof(Elf64_Ehdr) +
392                         (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
393                         phdr_ptr->p_memsz; /* Note sections */
394
395         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
396                 if (phdr_ptr->p_type != PT_LOAD)
397                         continue;
398
399                 /* Add this contiguous chunk of memory to vmcore list.*/
400                 new = get_new_element();
401                 if (!new)
402                         return -ENOMEM;
403                 new->paddr = phdr_ptr->p_offset;
404                 new->size = phdr_ptr->p_memsz;
405                 list_add_tail(&new->list, vc_list);
406
407                 /* Update the program header offset. */
408                 phdr_ptr->p_offset = vmcore_off;
409                 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
410         }
411         return 0;
412 }
413
414 static int __init process_ptload_program_headers_elf32(char *elfptr,
415                                                 size_t elfsz,
416                                                 struct list_head *vc_list)
417 {
418         int i;
419         Elf32_Ehdr *ehdr_ptr;
420         Elf32_Phdr *phdr_ptr;
421         loff_t vmcore_off;
422         struct vmcore *new;
423
424         ehdr_ptr = (Elf32_Ehdr *)elfptr;
425         phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
426
427         /* First program header is PT_NOTE header. */
428         vmcore_off = sizeof(Elf32_Ehdr) +
429                         (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) +
430                         phdr_ptr->p_memsz; /* Note sections */
431
432         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
433                 if (phdr_ptr->p_type != PT_LOAD)
434                         continue;
435
436                 /* Add this contiguous chunk of memory to vmcore list.*/
437                 new = get_new_element();
438                 if (!new)
439                         return -ENOMEM;
440                 new->paddr = phdr_ptr->p_offset;
441                 new->size = phdr_ptr->p_memsz;
442                 list_add_tail(&new->list, vc_list);
443
444                 /* Update the program header offset */
445                 phdr_ptr->p_offset = vmcore_off;
446                 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
447         }
448         return 0;
449 }
450
451 /* Sets offset fields of vmcore elements. */
452 static void __init set_vmcore_list_offsets_elf64(char *elfptr,
453                                                 struct list_head *vc_list)
454 {
455         loff_t vmcore_off;
456         Elf64_Ehdr *ehdr_ptr;
457         struct vmcore *m;
458
459         ehdr_ptr = (Elf64_Ehdr *)elfptr;
460
461         /* Skip Elf header and program headers. */
462         vmcore_off = sizeof(Elf64_Ehdr) +
463                         (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
464
465         list_for_each_entry(m, vc_list, list) {
466                 m->offset = vmcore_off;
467                 vmcore_off += m->size;
468         }
469 }
470
471 /* Sets offset fields of vmcore elements. */
472 static void __init set_vmcore_list_offsets_elf32(char *elfptr,
473                                                 struct list_head *vc_list)
474 {
475         loff_t vmcore_off;
476         Elf32_Ehdr *ehdr_ptr;
477         struct vmcore *m;
478
479         ehdr_ptr = (Elf32_Ehdr *)elfptr;
480
481         /* Skip Elf header and program headers. */
482         vmcore_off = sizeof(Elf32_Ehdr) +
483                         (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr);
484
485         list_for_each_entry(m, vc_list, list) {
486                 m->offset = vmcore_off;
487                 vmcore_off += m->size;
488         }
489 }
490
491 static int __init parse_crash_elf64_headers(void)
492 {
493         int rc=0;
494         Elf64_Ehdr ehdr;
495         u64 addr;
496
497         addr = elfcorehdr_addr;
498
499         /* Read Elf header */
500         rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
501         if (rc < 0)
502                 return rc;
503
504         /* Do some basic Verification. */
505         if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
506                 (ehdr.e_type != ET_CORE) ||
507                 !vmcore_elf_check_arch(&ehdr) ||
508                 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
509                 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
510                 ehdr.e_version != EV_CURRENT ||
511                 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
512                 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
513                 ehdr.e_phnum == 0) {
514                 printk(KERN_WARNING "Warning: Core image elf header is not"
515                                         "sane\n");
516                 return -EINVAL;
517         }
518
519         /* Read in all elf headers. */
520         elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
521         elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
522         if (!elfcorebuf)
523                 return -ENOMEM;
524         addr = elfcorehdr_addr;
525         rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
526         if (rc < 0) {
527                 kfree(elfcorebuf);
528                 return rc;
529         }
530
531         /* Merge all PT_NOTE headers into one. */
532         rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
533         if (rc) {
534                 kfree(elfcorebuf);
535                 return rc;
536         }
537         rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
538                                                         &vmcore_list);
539         if (rc) {
540                 kfree(elfcorebuf);
541                 return rc;
542         }
543         set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
544         return 0;
545 }
546
547 static int __init parse_crash_elf32_headers(void)
548 {
549         int rc=0;
550         Elf32_Ehdr ehdr;
551         u64 addr;
552
553         addr = elfcorehdr_addr;
554
555         /* Read Elf header */
556         rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
557         if (rc < 0)
558                 return rc;
559
560         /* Do some basic Verification. */
561         if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
562                 (ehdr.e_type != ET_CORE) ||
563                 !elf_check_arch(&ehdr) ||
564                 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
565                 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
566                 ehdr.e_version != EV_CURRENT ||
567                 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
568                 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
569                 ehdr.e_phnum == 0) {
570                 printk(KERN_WARNING "Warning: Core image elf header is not"
571                                         "sane\n");
572                 return -EINVAL;
573         }
574
575         /* Read in all elf headers. */
576         elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
577         elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
578         if (!elfcorebuf)
579                 return -ENOMEM;
580         addr = elfcorehdr_addr;
581         rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
582         if (rc < 0) {
583                 kfree(elfcorebuf);
584                 return rc;
585         }
586
587         /* Merge all PT_NOTE headers into one. */
588         rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
589         if (rc) {
590                 kfree(elfcorebuf);
591                 return rc;
592         }
593         rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
594                                                                 &vmcore_list);
595         if (rc) {
596                 kfree(elfcorebuf);
597                 return rc;
598         }
599         set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
600         return 0;
601 }
602
603 static int __init parse_crash_elf_headers(void)
604 {
605         unsigned char e_ident[EI_NIDENT];
606         u64 addr;
607         int rc=0;
608
609         addr = elfcorehdr_addr;
610         rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
611         if (rc < 0)
612                 return rc;
613         if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
614                 printk(KERN_WARNING "Warning: Core image elf header"
615                                         " not found\n");
616                 return -EINVAL;
617         }
618
619         if (e_ident[EI_CLASS] == ELFCLASS64) {
620                 rc = parse_crash_elf64_headers();
621                 if (rc)
622                         return rc;
623
624                 /* Determine vmcore size. */
625                 vmcore_size = get_vmcore_size_elf64(elfcorebuf);
626         } else if (e_ident[EI_CLASS] == ELFCLASS32) {
627                 rc = parse_crash_elf32_headers();
628                 if (rc)
629                         return rc;
630
631                 /* Determine vmcore size. */
632                 vmcore_size = get_vmcore_size_elf32(elfcorebuf);
633         } else {
634                 printk(KERN_WARNING "Warning: Core image elf header is not"
635                                         " sane\n");
636                 return -EINVAL;
637         }
638         return 0;
639 }
640
641 /* Init function for vmcore module. */
642 static int __init vmcore_init(void)
643 {
644         int rc = 0;
645
646         /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
647         if (!(is_vmcore_usable()))
648                 return rc;
649         rc = parse_crash_elf_headers();
650         if (rc) {
651                 printk(KERN_WARNING "Kdump: vmcore not initialized\n");
652                 return rc;
653         }
654
655         /* Initialize /proc/vmcore size if proc is already up. */
656         if (proc_vmcore)
657                 proc_vmcore->size = vmcore_size;
658         return 0;
659 }
660 module_init(vmcore_init)