1 #include <linux/section-names.h>
8 #define VMLINUX_SYMBOL(_sym_) _sym_
11 /* Align . to a 8 byte boundary equals to maximum function alignment. */
12 #define ALIGN_FUNCTION() . = ALIGN(8)
14 /* The actual configuration determine if the init/exit sections
15 * are handled as text/data or they can be discarded (which
16 * often happens at runtime)
19 #define DEV_KEEP(sec) *(.dev##sec)
20 #define DEV_DISCARD(sec)
23 #define DEV_DISCARD(sec) *(.dev##sec)
26 #ifdef CONFIG_HOTPLUG_CPU
27 #define CPU_KEEP(sec) *(.cpu##sec)
28 #define CPU_DISCARD(sec)
31 #define CPU_DISCARD(sec) *(.cpu##sec)
34 #if defined(CONFIG_MEMORY_HOTPLUG)
35 #define MEM_KEEP(sec) *(.mem##sec)
36 #define MEM_DISCARD(sec)
39 #define MEM_DISCARD(sec) *(.mem##sec)
42 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
43 #define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \
45 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
50 #ifdef CONFIG_TRACE_BRANCH_PROFILING
51 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
52 *(_ftrace_annotated_branch) \
53 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
55 #define LIKELY_PROFILE()
58 #ifdef CONFIG_PROFILE_ALL_BRANCHES
59 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
61 VMLINUX_SYMBOL(__stop_branch_profile) = .;
63 #define BRANCH_PROFILE()
66 #ifdef CONFIG_EVENT_TRACER
67 #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
69 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
71 #define FTRACE_EVENTS()
75 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
76 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
77 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
79 #define TRACE_PRINTKS()
82 #ifdef CONFIG_FTRACE_SYSCALLS
83 #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
84 *(__syscalls_metadata) \
85 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
87 #define TRACE_SYSCALLS()
101 VMLINUX_SYMBOL(__start___markers) = .; \
103 VMLINUX_SYMBOL(__stop___markers) = .; \
105 VMLINUX_SYMBOL(__start___tracepoints) = .; \
107 VMLINUX_SYMBOL(__stop___tracepoints) = .; \
108 /* implement dynamic printk debug */ \
110 VMLINUX_SYMBOL(__start___verbose) = .; \
112 VMLINUX_SYMBOL(__stop___verbose) = .; \
119 #define RO_DATA(align) \
120 . = ALIGN((align)); \
121 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
122 VMLINUX_SYMBOL(__start_rodata) = .; \
123 *(.rodata) *(.rodata.*) \
124 *(__vermagic) /* Kernel version magic */ \
125 *(__markers_strings) /* Markers: strings */ \
126 *(__tracepoints_strings)/* Tracepoints: strings */ \
129 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
136 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
137 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
138 *(.pci_fixup_early) \
139 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
140 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
141 *(.pci_fixup_header) \
142 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
143 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
144 *(.pci_fixup_final) \
145 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
146 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
147 *(.pci_fixup_enable) \
148 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
149 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
150 *(.pci_fixup_resume) \
151 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
152 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
153 *(.pci_fixup_resume_early) \
154 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
155 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
156 *(.pci_fixup_suspend) \
157 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
160 /* Built-in firmware blobs */ \
161 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
162 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
164 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
167 /* RapidIO route ops */ \
168 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
169 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
171 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
176 /* Kernel symbol table: Normal symbols */ \
177 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
178 VMLINUX_SYMBOL(__start___ksymtab) = .; \
180 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
183 /* Kernel symbol table: GPL-only symbols */ \
184 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
185 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
187 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
190 /* Kernel symbol table: Normal unused symbols */ \
191 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
192 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
193 *(__ksymtab_unused) \
194 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
197 /* Kernel symbol table: GPL-only unused symbols */ \
198 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
199 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
200 *(__ksymtab_unused_gpl) \
201 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
204 /* Kernel symbol table: GPL-future-only symbols */ \
205 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
206 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
207 *(__ksymtab_gpl_future) \
208 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
211 /* Kernel symbol table: Normal symbols */ \
212 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
213 VMLINUX_SYMBOL(__start___kcrctab) = .; \
215 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
218 /* Kernel symbol table: GPL-only symbols */ \
219 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
220 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
222 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
225 /* Kernel symbol table: Normal unused symbols */ \
226 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
227 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
228 *(__kcrctab_unused) \
229 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
232 /* Kernel symbol table: GPL-only unused symbols */ \
233 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
234 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
235 *(__kcrctab_unused_gpl) \
236 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
239 /* Kernel symbol table: GPL-future-only symbols */ \
240 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
241 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
242 *(__kcrctab_gpl_future) \
243 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
246 /* Kernel symbol table: strings */ \
247 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
248 *(__ksymtab_strings) \
251 /* __*init sections */ \
252 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
255 DEV_KEEP(init.rodata) \
256 DEV_KEEP(exit.rodata) \
257 CPU_KEEP(init.rodata) \
258 CPU_KEEP(exit.rodata) \
259 MEM_KEEP(init.rodata) \
260 MEM_KEEP(exit.rodata) \
263 /* Built-in module parameters. */ \
264 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
265 VMLINUX_SYMBOL(__start___param) = .; \
267 VMLINUX_SYMBOL(__stop___param) = .; \
268 . = ALIGN((align)); \
269 VMLINUX_SYMBOL(__end_rodata) = .; \
273 /* RODATA provided for backward compatibility.
274 * All archs are supposed to use RO_DATA() */
275 #define RODATA RO_DATA(4096)
277 #define SECURITY_INIT \
278 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
279 VMLINUX_SYMBOL(__security_initcall_start) = .; \
280 *(.security_initcall.init) \
281 VMLINUX_SYMBOL(__security_initcall_end) = .; \
284 /* .text section. Map to function alignment to avoid address changes
285 * during second ld run in second ld pass when generating System.map */
291 DEV_KEEP(init.text) \
292 DEV_KEEP(exit.text) \
293 CPU_KEEP(init.text) \
294 CPU_KEEP(exit.text) \
295 MEM_KEEP(init.text) \
296 MEM_KEEP(exit.text) \
300 /* sched.text is aling to function alignment to secure we have same
301 * address even at second ld pass when generating System.map */
304 VMLINUX_SYMBOL(__sched_text_start) = .; \
306 VMLINUX_SYMBOL(__sched_text_end) = .;
308 /* spinlock.text is aling to function alignment to secure we have same
309 * address even at second ld pass when generating System.map */
312 VMLINUX_SYMBOL(__lock_text_start) = .; \
314 VMLINUX_SYMBOL(__lock_text_end) = .;
316 #define KPROBES_TEXT \
318 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
320 VMLINUX_SYMBOL(__kprobes_text_end) = .;
322 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
323 #define IRQENTRY_TEXT \
325 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
327 VMLINUX_SYMBOL(__irqentry_text_end) = .;
329 #define IRQENTRY_TEXT
332 /* Section used for early init (in .S files) */
333 #define HEAD_TEXT *(HEAD_TEXT_SECTION)
335 /* init and exit section handling */
338 DEV_DISCARD(init.data) \
339 DEV_DISCARD(init.rodata) \
340 CPU_DISCARD(init.data) \
341 CPU_DISCARD(init.rodata) \
342 MEM_DISCARD(init.data) \
343 MEM_DISCARD(init.rodata)
347 DEV_DISCARD(init.text) \
348 CPU_DISCARD(init.text) \
349 MEM_DISCARD(init.text)
353 DEV_DISCARD(exit.data) \
354 DEV_DISCARD(exit.rodata) \
355 CPU_DISCARD(exit.data) \
356 CPU_DISCARD(exit.rodata) \
357 MEM_DISCARD(exit.data) \
358 MEM_DISCARD(exit.rodata)
362 DEV_DISCARD(exit.text) \
363 CPU_DISCARD(exit.text) \
364 MEM_DISCARD(exit.text)
366 /* DWARF debug sections.
367 Symbols in the DWARF debugging sections are relative to
368 the beginning of the section so we begin them at 0. */
369 #define DWARF_DEBUG \
371 .debug 0 : { *(.debug) } \
372 .line 0 : { *(.line) } \
373 /* GNU DWARF 1 extensions */ \
374 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
375 .debug_sfnames 0 : { *(.debug_sfnames) } \
376 /* DWARF 1.1 and DWARF 2 */ \
377 .debug_aranges 0 : { *(.debug_aranges) } \
378 .debug_pubnames 0 : { *(.debug_pubnames) } \
380 .debug_info 0 : { *(.debug_info \
381 .gnu.linkonce.wi.*) } \
382 .debug_abbrev 0 : { *(.debug_abbrev) } \
383 .debug_line 0 : { *(.debug_line) } \
384 .debug_frame 0 : { *(.debug_frame) } \
385 .debug_str 0 : { *(.debug_str) } \
386 .debug_loc 0 : { *(.debug_loc) } \
387 .debug_macinfo 0 : { *(.debug_macinfo) } \
388 /* SGI/MIPS DWARF 2 extensions */ \
389 .debug_weaknames 0 : { *(.debug_weaknames) } \
390 .debug_funcnames 0 : { *(.debug_funcnames) } \
391 .debug_typenames 0 : { *(.debug_typenames) } \
392 .debug_varnames 0 : { *(.debug_varnames) } \
394 /* Stabs debugging sections. */
395 #define STABS_DEBUG \
396 .stab 0 : { *(.stab) } \
397 .stabstr 0 : { *(.stabstr) } \
398 .stab.excl 0 : { *(.stab.excl) } \
399 .stab.exclstr 0 : { *(.stab.exclstr) } \
400 .stab.index 0 : { *(.stab.index) } \
401 .stab.indexstr 0 : { *(.stab.indexstr) } \
402 .comment 0 : { *(.comment) }
404 #ifdef CONFIG_GENERIC_BUG
407 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
408 VMLINUX_SYMBOL(__start___bug_table) = .; \
410 VMLINUX_SYMBOL(__stop___bug_table) = .; \
416 #ifdef CONFIG_PM_TRACE
419 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
420 VMLINUX_SYMBOL(__tracedata_start) = .; \
422 VMLINUX_SYMBOL(__tracedata_end) = .; \
429 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
430 VMLINUX_SYMBOL(__start_notes) = .; \
432 VMLINUX_SYMBOL(__stop_notes) = .; \
436 *(.initcallearly.init) \
437 VMLINUX_SYMBOL(__early_initcall_end) = .; \
439 *(.initcall0s.init) \
441 *(.initcall1s.init) \
443 *(.initcall2s.init) \
445 *(.initcall3s.init) \
447 *(.initcall4s.init) \
449 *(.initcall5s.init) \
450 *(.initcallrootfs.init) \
452 *(.initcall6s.init) \
457 * PERCPU_VADDR - define output section for percpu area
458 * @vaddr: explicit base address (optional)
459 * @phdr: destination PHDR (optional)
461 * Macro which expands to output section for percpu area. If @vaddr
462 * is not blank, it specifies explicit base address and all percpu
463 * symbols will be offset from the given address. If blank, @vaddr
464 * always equals @laddr + LOAD_OFFSET.
466 * @phdr defines the output PHDR to use if not blank. Be warned that
467 * output PHDR is sticky. If @phdr is specified, the next output
468 * section in the linker script will go there too. @phdr should have
471 * Note that this macros defines __per_cpu_load as an absolute symbol.
472 * If there is no need to put the percpu section at a predetermined
473 * address, use PERCPU().
475 #define PERCPU_VADDR(vaddr, phdr) \
476 VMLINUX_SYMBOL(__per_cpu_load) = .; \
477 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
479 VMLINUX_SYMBOL(__per_cpu_start) = .; \
480 *(.data.percpu.first) \
481 *(.data.percpu.page_aligned) \
483 *(.data.percpu.shared_aligned) \
484 VMLINUX_SYMBOL(__per_cpu_end) = .; \
486 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
489 * PERCPU - define output section for percpu area, simple version
490 * @align: required alignment
492 * Align to @align and outputs output section for percpu area. This
493 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
494 * __per_cpu_start will be identical.
496 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
497 * that __per_cpu_load is defined as a relative symbol against
498 * .data.percpu which is required for relocatable x86_32
501 #define PERCPU(align) \
503 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
504 VMLINUX_SYMBOL(__per_cpu_load) = .; \
505 VMLINUX_SYMBOL(__per_cpu_start) = .; \
506 *(.data.percpu.first) \
507 *(.data.percpu.page_aligned) \
509 *(.data.percpu.shared_aligned) \
510 VMLINUX_SYMBOL(__per_cpu_end) = .; \