|
40 | 40 | #include <asm/debugreg.h>
|
41 | 41 | #include <asm/set_memory.h>
|
42 | 42 | #include <asm/sections.h>
|
| 43 | +#include <asm/nospec-branch.h> |
43 | 44 |
|
44 | 45 | #include "common.h"
|
45 | 46 |
|
@@ -205,7 +206,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
|
205 | 206 | }
|
206 | 207 |
|
207 | 208 | /* Check whether insn is indirect jump */
|
208 |
| -static int insn_is_indirect_jump(struct insn *insn) |
| 209 | +static int __insn_is_indirect_jump(struct insn *insn) |
209 | 210 | {
|
210 | 211 | return ((insn->opcode.bytes[0] == 0xff &&
|
211 | 212 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
|
@@ -239,6 +240,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
|
239 | 240 | return (start <= target && target <= start + len);
|
240 | 241 | }
|
241 | 242 |
|
| 243 | +static int insn_is_indirect_jump(struct insn *insn) |
| 244 | +{ |
| 245 | + int ret = __insn_is_indirect_jump(insn); |
| 246 | + |
| 247 | +#ifdef CONFIG_RETPOLINE |
| 248 | + /* |
| 249 | + * Jump to x86_indirect_thunk_* is treated as an indirect jump. |
| 250 | + * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with |
| 251 | + * older gcc may use indirect jump. So we add this check instead of |
| 252 | + * replace indirect-jump check. |
| 253 | + */ |
| 254 | + if (!ret) |
| 255 | + ret = insn_jump_into_range(insn, |
| 256 | + (unsigned long)__indirect_thunk_start, |
| 257 | + (unsigned long)__indirect_thunk_end - |
| 258 | + (unsigned long)__indirect_thunk_start); |
| 259 | +#endif |
| 260 | + return ret; |
| 261 | +} |
| 262 | + |
242 | 263 | /* Decode whole function to ensure any instructions don't jump into target */
|
243 | 264 | static int can_optimize(unsigned long paddr)
|
244 | 265 | {
|
|
0 commit comments