15
15
#include <asm/memory.h>
16
16
#include <asm/page.h>
17
17
18
- #define PROC_INFO \
19
- . = ALIGN(4 ); \
20
- VMLINUX_SYMBOL(__proc_info_begin) = .; \
21
- *(.proc.info.init) \
22
- VMLINUX_SYMBOL(__proc_info_end) = .;
23
-
24
- #define IDMAP_TEXT \
25
- ALIGN_FUNCTION(); \
26
- VMLINUX_SYMBOL(__idmap_text_start) = .; \
27
- *(.idmap.text) \
28
- VMLINUX_SYMBOL(__idmap_text_end) = .; \
29
- . = ALIGN(PAGE_SIZE); \
30
- VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
31
- *(.hyp.idmap.text) \
32
- VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
33
-
34
- #ifdef CONFIG_HOTPLUG_CPU
35
- #define ARM_CPU_DISCARD(x)
36
- #define ARM_CPU_KEEP(x) x
37
- #else
38
- #define ARM_CPU_DISCARD(x) x
39
- #define ARM_CPU_KEEP(x)
40
- #endif
41
-
42
- #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
43
- defined(CONFIG_GENERIC_BUG)
44
- #define ARM_EXIT_KEEP(x) x
45
- #define ARM_EXIT_DISCARD(x)
46
- #else
47
- #define ARM_EXIT_KEEP(x)
48
- #define ARM_EXIT_DISCARD(x) x
49
- #endif
18
+ #include "vmlinux.lds.h"
50
19
51
20
OUTPUT_ARCH(arm)
52
21
ENTRY(stext)
@@ -69,20 +38,9 @@ SECTIONS
69
38
* unwind sections get included.
70
39
*/
71
40
/DISCARD/ : {
72
- *(.ARM.exidx.exit.text)
73
- *(.ARM.extab.exit.text)
74
- ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
75
- ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
76
- ARM_EXIT_DISCARD(EXIT_TEXT)
77
- ARM_EXIT_DISCARD(EXIT_DATA)
78
- EXIT_CALL
79
- #ifndef CONFIG_MMU
80
- *(.text .fixup)
81
- *(__ex_table)
82
- #endif
41
+ ARM_DISCARD
83
42
*(.alt.smp.init)
84
- *(.discard )
85
- *(.discard .*)
43
+ *(.pv_table)
86
44
}
87
45
88
46
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
@@ -95,76 +53,27 @@ SECTIONS
95
53
96
54
.text : { /* Real text segment */
97
55
_stext = .; /* Text and read-only data */
98
- IDMAP_TEXT
99
- __entry_text_start = .;
100
- *(.entry.text)
101
- __entry_text_end = .;
102
- IRQENTRY_TEXT
103
- TEXT_TEXT
104
- SCHED_TEXT
105
- CPUIDLE_TEXT
106
- LOCK_TEXT
107
- KPROBES_TEXT
108
- *(.gnu.warning)
109
- *(.glue_7)
110
- *(.glue_7t)
111
- . = ALIGN(4 );
112
- *(.got) /* Global offset table */
113
- ARM_CPU_KEEP(PROC_INFO)
56
+ ARM_TEXT
114
57
}
115
58
116
59
RO_DATA(PAGE_SIZE)
117
60
118
61
. = ALIGN(4 );
119
62
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
120
63
__start___ex_table = .;
121
- #ifdef CONFIG_MMU
122
- *(__ex_table)
123
- #endif
64
+ ARM_MMU_KEEP(*(__ex_table))
124
65
__stop___ex_table = .;
125
66
}
126
67
127
68
#ifdef CONFIG_ARM_UNWIND
128
- /*
129
- * Stack unwinding tables
130
- */
131
- . = ALIGN(8 );
132
- .ARM.unwind_idx : {
133
- __start_unwind_idx = .;
134
- *(.ARM.exidx*)
135
- __stop_unwind_idx = .;
136
- }
137
- .ARM.unwind_tab : {
138
- __start_unwind_tab = .;
139
- *(.ARM.extab*)
140
- __stop_unwind_tab = .;
141
- }
69
+ ARM_UNWIND_SECTIONS
142
70
#endif
143
71
144
72
NOTES
145
73
146
74
_etext = .; /* End of text and rodata section */
147
75
148
- /*
149
- * The vectors and stubs are relocatable code, and the
150
- * only thing that matters is their relative offsets
151
- */
152
- __vectors_start = .;
153
- .vectors 0xffff0000 : AT(__vectors_start) {
154
- *(.vectors)
155
- }
156
- . = __vectors_start + SIZEOF(.vectors);
157
- __vectors_end = .;
158
-
159
- __stubs_start = .;
160
- .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
161
- *(.stubs)
162
- }
163
- . = __stubs_start + SIZEOF(.stubs);
164
- __stubs_end = .;
165
-
166
- PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
167
-
76
+ ARM_VECTORS
168
77
INIT_TEXT_SECTION(8 )
169
78
.exit.text : {
170
79
ARM_EXIT_KEEP(EXIT_TEXT)
@@ -223,6 +132,10 @@ SECTIONS
223
132
PERCPU_SECTION(L1_CACHE_BYTES)
224
133
#endif
225
134
135
+ #ifdef CONFIG_HAVE_TCM
136
+ ARM_TCM
137
+ #endif
138
+
226
139
/*
227
140
* End of copied data. We need a dummy section to get its LMA.
228
141
* Also located before final ALIGN() as trailing padding is not stored
@@ -234,63 +147,6 @@ SECTIONS
234
147
. = ALIGN(PAGE_SIZE);
235
148
__init_end = .;
236
149
237
- #ifdef CONFIG_HAVE_TCM
238
- /*
239
- * We align everything to a page boundary so we can
240
- * free it after init has commenced and TCM contents have
241
- * been copied to its destination.
242
- */
243
- .tcm_start : {
244
- . = ALIGN(PAGE_SIZE);
245
- __tcm_start = .;
246
- __itcm_start = .;
247
- }
248
-
249
- /*
250
- * Link these to the ITCM RAM
251
- * Put VMA to the TCM address and LMA to the common RAM
252
- * and we'll upload the contents from RAM to TCM and free
253
- * the used RAM after that.
254
- */
255
- .text_itcm ITCM_OFFSET : AT(__itcm_start)
256
- {
257
- __sitcm_text = .;
258
- *(.tcm.text)
259
- *(.tcm.rodata)
260
- . = ALIGN(4 );
261
- __eitcm_text = .;
262
- }
263
-
264
- /*
265
- * Reset the dot pointer, this is needed to create the
266
- * relative __dtcm_start below (to be used as extern in code).
267
- */
268
- . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
269
-
270
- .dtcm_start : {
271
- __dtcm_start = .;
272
- }
273
-
274
- /* TODO: add remainder of ITCM as well, that can be used for data! */
275
- .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
276
- {
277
- . = ALIGN(4 );
278
- __sdtcm_data = .;
279
- *(.tcm.data)
280
- . = ALIGN(4 );
281
- __edtcm_data = .;
282
- }
283
-
284
- /* Reset the dot pointer or the linker gets confused */
285
- . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
286
-
287
- /* End marker for freeing TCM copy in linked object */
288
- .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
289
- . = ALIGN(PAGE_SIZE);
290
- __tcm_end = .;
291
- }
292
- #endif
293
-
294
150
BSS_SECTION(0 , 0 , 8 )
295
151
_end = .;
296
152
0 commit comments