1 /* ld script to make x86-64 Linux kernel
2 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
5 #define LOAD_OFFSET __START_KERNEL_map
7 #include <asm-generic/vmlinux.lds.h>
10 #undef i386 /* in case the preprocessor is a 32bit one */
12 OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
13 OUTPUT_ARCH(i386:x86-64)
14 ENTRY(phys_startup_64)
18 text PT_LOAD FLAGS(5); /* R_E */
19 data PT_LOAD FLAGS(7); /* RWE */
20 user PT_LOAD FLAGS(7); /* RWE */
21 data.init PT_LOAD FLAGS(7); /* RWE */
22 note PT_NOTE FLAGS(4); /* R__ */
27 phys_startup_64 = startup_64 - LOAD_OFFSET;
28 _text = .; /* Text and read-only data */
29 .text : AT(ADDR(.text) - LOAD_OFFSET) {
30 /* First the code that has to be first for bootstrapping */
40 _etext = .; /* End of text section */
43 . = ALIGN(16); /* Exception table */
44 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
45 __start___ex_table = .;
47 __stop___ex_table = .;
57 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
58 __tracedata_start = .;
63 . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
65 .data : AT(ADDR(.data) - LOAD_OFFSET) {
70 _edata = .; /* End of data section */
73 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
74 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
75 *(.data.cacheline_aligned)
77 . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
78 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
82 #define VSYSCALL_ADDR (-10*1024*1024)
83 #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
84 #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
86 #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
87 #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
89 #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
90 #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
93 .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
94 __vsyscall_0 = VSYSCALL_VIRT_ADDR;
96 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
97 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { *(.vsyscall_fn) }
98 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
99 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data))
100 { *(.vsyscall_gtod_data) }
101 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
102 .vsyscall_clock : AT(VLOAD(.vsyscall_clock))
103 { *(.vsyscall_clock) }
104 vsyscall_clock = VVIRT(.vsyscall_clock);
107 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1))
109 .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2))
112 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) }
113 vgetcpu_mode = VVIRT(.vgetcpu_mode);
115 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
116 .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
117 jiffies = VVIRT(.jiffies);
119 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3))
122 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
125 #undef VSYSCALL_PHYS_ADDR
126 #undef VSYSCALL_VIRT_ADDR
132 . = ALIGN(THREAD_SIZE); /* init_task */
133 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
137 . = ALIGN(PAGE_SIZE);
138 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
139 *(.data.page_aligned)
142 /* might get freed after init */
143 . = ALIGN(PAGE_SIZE);
146 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
150 . = ALIGN(PAGE_SIZE);
153 . = ALIGN(PAGE_SIZE); /* Init code and data */
155 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
160 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
161 __initdata_begin = .;
168 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
170 __initcall_start = .;
171 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
175 __con_initcall_start = .;
176 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
177 *(.con_initcall.init)
179 __con_initcall_end = .;
180 __x86cpuvendor_start = .;
181 .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
182 *(.x86cpuvendor.init)
184 __x86cpuvendor_end = .;
188 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
189 __parainstructions = .;
191 __parainstructions_end = .;
195 __alt_instructions = .;
196 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
199 __alt_instructions_end = .;
200 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
201 *(.altinstr_replacement)
203 /* .exit.text is discard at runtime, not link time, to deal with references
204 from .altinstructions and .eh_frame */
205 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
208 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
212 /* vdso blob that is mapped into user space */
214 .vdso : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) }
215 . = ALIGN(PAGE_SIZE);
218 #ifdef CONFIG_BLK_DEV_INITRD
219 . = ALIGN(PAGE_SIZE);
220 __initramfs_start = .;
221 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
227 . = ALIGN(PAGE_SIZE);
230 . = ALIGN(PAGE_SIZE);
232 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
233 . = ALIGN(PAGE_SIZE);
236 __bss_start = .; /* BSS */
237 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
245 /* Sections to be discarded */
257 * Build-time check on the image size:
259 ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
260 "kernel image bigger than KERNEL_IMAGE_SIZE")