2 #define PROVIDE32(x) PROVIDE(__unused__##x)
4 #define PROVIDE32(x) PROVIDE(x)
7 #include <asm-generic/vmlinux.lds.h>
13 kernel PT_LOAD FLAGS(7); /* RWX */
14 notes PT_NOTE FLAGS(0);
15 dummy PT_NOTE FLAGS(0);
17 /* binutils < 2.18 has a bug that makes it misbehave when taking an
18 ELF file with all segments at load address 0 as input. This
19 happens when running "strip" on vmlinux, because of the AT() magic
20 in this linker script. People using GCC >= 4.2 won't run into
21 this problem, because the "build-id" support will put some data
22 into the "notes" segment (at a non-zero load address).
24 To work around this, we force some data into both the "dummy"
25 segment and the kernel segment, so the dummy segment will get a
26 non-zero load address. It's not enough to always create the
27 "notes" segment, since if nothing gets assigned to it, its load
28 address will be zero. */
32 OUTPUT_ARCH(powerpc:common64)
35 OUTPUT_ARCH(powerpc:common)
36 jiffies = jiffies_64 + 4;
40 /* Sections to be discarded. */
49 * Text, read only data and other permanent read-only sections
53 .text : AT(ADDR(.text) - LOAD_OFFSET) {
57 /* careful! __ftr_alt_* sections need to be close to .text */
58 *(.text .fixup __ftr_alt_* .ref.text)
69 #endif /* CONFIG_PPC32 */
75 PROVIDE32 (etext = .);
80 /* Exception & bug tables */
81 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
82 __start___ex_table = .;
84 __stop___ex_table = .;
89 /* The dummy segment contents for the bug workaround mentioned above
91 .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
98 * Init sections discarded at runtime
100 . = ALIGN(PAGE_SIZE);
103 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
109 /* .exit.text is discarded at runtime, not link time,
110 * to deal with references from __bug_table
112 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
116 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
118 __vtop_table_begin = .;
120 __vtop_table_end = .;
121 __ptov_table_begin = .;
123 __ptov_table_end = .;
124 #ifdef CONFIG_PPC_ISERIES
125 __dt_strings_start = .;
127 __dt_strings_end = .;
132 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
138 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
139 __initcall_start = .;
144 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
145 __con_initcall_start = .;
146 *(.con_initcall.init)
147 __con_initcall_end = .;
153 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
154 __start___ftr_fixup = .;
156 __stop___ftr_fixup = .;
159 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
160 __start___mmu_ftr_fixup = .;
162 __stop___mmu_ftr_fixup = .;
165 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
166 __start___lwsync_fixup = .;
168 __stop___lwsync_fixup = .;
172 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
173 __start___fw_ftr_fixup = .;
175 __stop___fw_ftr_fixup = .;
178 #ifdef CONFIG_BLK_DEV_INITRD
179 . = ALIGN(PAGE_SIZE);
180 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
181 __initramfs_start = .;
189 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
190 __machine_desc_start = . ;
192 __machine_desc_end = . ;
194 #ifdef CONFIG_RELOCATABLE
196 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) }
197 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
198 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
203 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
204 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
205 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
207 __rela_dyn_start = .;
212 /* freed after init ends here */
213 . = ALIGN(PAGE_SIZE);
217 * And now the various read/write data
220 . = ALIGN(PAGE_SIZE);
224 .data : AT(ADDR(.data) - LOAD_OFFSET) {
230 .data : AT(ADDR(.data) - LOAD_OFFSET) {
237 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
241 .got : AT(ADDR(.got) - LOAD_OFFSET) {
248 . = ALIGN(PAGE_SIZE);
250 PROVIDE32 (edata = .);
252 /* The initial task and kernel stack */
258 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
262 . = ALIGN(PAGE_SIZE);
263 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
264 *(.data.page_aligned)
267 . = ALIGN(L1_CACHE_BYTES);
268 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
269 *(.data.cacheline_aligned)
272 . = ALIGN(L1_CACHE_BYTES);
273 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
277 . = ALIGN(PAGE_SIZE);
278 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
281 . = ALIGN(PAGE_SIZE);
286 * And finally the bss
289 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
298 . = ALIGN(PAGE_SIZE);