@@ -23,74 +23,6 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
23
23
memset32 (area , BREAKPOINT_INSTRUCTION , size / 4 );
24
24
}
25
25
26
- /* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
27
- static int bpf_jit_fixup_addresses (struct bpf_prog * fp , u32 * image ,
28
- struct codegen_context * ctx , u32 * addrs )
29
- {
30
- const struct bpf_insn * insn = fp -> insnsi ;
31
- bool func_addr_fixed ;
32
- u64 func_addr ;
33
- u32 tmp_idx ;
34
- int i , j , ret ;
35
-
36
- for (i = 0 ; i < fp -> len ; i ++ ) {
37
- /*
38
- * During the extra pass, only the branch target addresses for
39
- * the subprog calls need to be fixed. All other instructions
40
- * can left untouched.
41
- *
42
- * The JITed image length does not change because we already
43
- * ensure that the JITed instruction sequence for these calls
44
- * are of fixed length by padding them with NOPs.
45
- */
46
- if (insn [i ].code == (BPF_JMP | BPF_CALL ) &&
47
- insn [i ].src_reg == BPF_PSEUDO_CALL ) {
48
- ret = bpf_jit_get_func_addr (fp , & insn [i ], true,
49
- & func_addr ,
50
- & func_addr_fixed );
51
- if (ret < 0 )
52
- return ret ;
53
-
54
- /*
55
- * Save ctx->idx as this would currently point to the
56
- * end of the JITed image and set it to the offset of
57
- * the instruction sequence corresponding to the
58
- * subprog call temporarily.
59
- */
60
- tmp_idx = ctx -> idx ;
61
- ctx -> idx = addrs [i ] / 4 ;
62
- ret = bpf_jit_emit_func_call_rel (image , ctx , func_addr );
63
- if (ret )
64
- return ret ;
65
-
66
- /*
67
- * Restore ctx->idx here. This is safe as the length
68
- * of the JITed sequence remains unchanged.
69
- */
70
- ctx -> idx = tmp_idx ;
71
- } else if (insn [i ].code == (BPF_LD | BPF_IMM | BPF_DW )) {
72
- tmp_idx = ctx -> idx ;
73
- ctx -> idx = addrs [i ] / 4 ;
74
- #ifdef CONFIG_PPC32
75
- PPC_LI32 (bpf_to_ppc (insn [i ].dst_reg ) - 1 , (u32 )insn [i + 1 ].imm );
76
- PPC_LI32 (bpf_to_ppc (insn [i ].dst_reg ), (u32 )insn [i ].imm );
77
- for (j = ctx -> idx - addrs [i ] / 4 ; j < 4 ; j ++ )
78
- EMIT (PPC_RAW_NOP ());
79
- #else
80
- func_addr = ((u64 )(u32 )insn [i ].imm ) | (((u64 )(u32 )insn [i + 1 ].imm ) << 32 );
81
- PPC_LI64 (bpf_to_ppc (insn [i ].dst_reg ), func_addr );
82
- /* overwrite rest with nops */
83
- for (j = ctx -> idx - addrs [i ] / 4 ; j < 5 ; j ++ )
84
- EMIT (PPC_RAW_NOP ());
85
- #endif
86
- ctx -> idx = tmp_idx ;
87
- i ++ ;
88
- }
89
- }
90
-
91
- return 0 ;
92
- }
93
-
94
26
int bpf_jit_emit_exit_insn (u32 * image , struct codegen_context * ctx , int tmp_reg , long exit_addr )
95
27
{
96
28
if (!exit_addr || is_offset_in_branch_range (exit_addr - (ctx -> idx * 4 ))) {
@@ -185,7 +117,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
185
117
cgctx .stack_size = round_up (fp -> aux -> stack_depth , 16 );
186
118
187
119
/* Scouting faux-generate pass 0 */
188
- if (bpf_jit_build_body (fp , 0 , & cgctx , addrs , 0 )) {
120
+ if (bpf_jit_build_body (fp , 0 , & cgctx , addrs , 0 , false )) {
189
121
/* We hit something illegal or unsupported. */
190
122
fp = org_fp ;
191
123
goto out_addrs ;
@@ -200,7 +132,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
200
132
*/
201
133
if (cgctx .seen & SEEN_TAILCALL || !is_offset_in_branch_range ((long )cgctx .idx * 4 )) {
202
134
cgctx .idx = 0 ;
203
- if (bpf_jit_build_body (fp , 0 , & cgctx , addrs , 0 )) {
135
+ if (bpf_jit_build_body (fp , 0 , & cgctx , addrs , 0 , false )) {
204
136
fp = org_fp ;
205
137
goto out_addrs ;
206
138
}
@@ -234,29 +166,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
234
166
skip_init_ctx :
235
167
code_base = (u32 * )(image + FUNCTION_DESCR_SIZE );
236
168
237
- if (extra_pass ) {
238
- /*
239
- * Do not touch the prologue and epilogue as they will remain
240
- * unchanged. Only fix the branch target address for subprog
241
- * calls in the body, and ldimm64 instructions.
242
- *
243
- * This does not change the offsets and lengths of the subprog
244
- * call instruction sequences and hence, the size of the JITed
245
- * image as well.
246
- */
247
- bpf_jit_fixup_addresses (fp , code_base , & cgctx , addrs );
248
-
249
- /* There is no need to perform the usual passes. */
250
- goto skip_codegen_passes ;
251
- }
252
-
253
169
/* Code generation passes 1-2 */
254
170
for (pass = 1 ; pass < 3 ; pass ++ ) {
255
171
/* Now build the prologue, body code & epilogue for real. */
256
172
cgctx .idx = 0 ;
257
173
cgctx .alt_exit_addr = 0 ;
258
174
bpf_jit_build_prologue (code_base , & cgctx );
259
- if (bpf_jit_build_body (fp , code_base , & cgctx , addrs , pass )) {
175
+ if (bpf_jit_build_body (fp , code_base , & cgctx , addrs , pass , extra_pass )) {
260
176
bpf_jit_binary_free (bpf_hdr );
261
177
fp = org_fp ;
262
178
goto out_addrs ;
@@ -268,7 +184,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
268
184
proglen - (cgctx .idx * 4 ), cgctx .seen );
269
185
}
270
186
271
- skip_codegen_passes :
272
187
if (bpf_jit_enable > 1 )
273
188
/*
274
189
* Note that we output the base address of the code_base
0 commit comments