|
| 1 | +use crate::prelude::*; |
| 2 | + |
| 3 | +use std::fmt::Write; |
| 4 | + |
| 5 | +use rustc_ast::ast::{InlineAsmTemplatePiece, InlineAsmOptions}; |
| 6 | +use rustc_middle::mir::InlineAsmOperand; |
| 7 | +use rustc_target::asm::*; |
| 8 | + |
| 9 | +pub(crate) fn codegen_inline_asm<'tcx>( |
| 10 | + fx: &mut FunctionCx<'_, 'tcx, impl Backend>, |
| 11 | + _span: Span, |
| 12 | + template: &[InlineAsmTemplatePiece], |
| 13 | + operands: &[InlineAsmOperand<'tcx>], |
| 14 | + options: InlineAsmOptions, |
| 15 | +) { |
| 16 | + // FIXME add .eh_frame unwind info directives |
| 17 | + |
| 18 | + if template.is_empty() { |
| 19 | + // Black box |
| 20 | + return; |
| 21 | + } |
| 22 | + |
| 23 | + let mut slot_size = Size::from_bytes(0); |
| 24 | + let mut clobbered_regs = Vec::new(); |
| 25 | + let mut inputs = Vec::new(); |
| 26 | + let mut outputs = Vec::new(); |
| 27 | + |
| 28 | + let mut new_slot = |reg_class: InlineAsmRegClass| { |
| 29 | + let reg_size = reg_class |
| 30 | + .supported_types(InlineAsmArch::X86_64) |
| 31 | + .iter() |
| 32 | + .map(|(ty, _)| ty.size()) |
| 33 | + .max() |
| 34 | + .unwrap(); |
| 35 | + let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap(); |
| 36 | + slot_size = slot_size.align_to(align); |
| 37 | + let offset = slot_size; |
| 38 | + slot_size += reg_size; |
| 39 | + offset |
| 40 | + }; |
| 41 | + |
| 42 | + // FIXME overlap input and output slots to save stack space |
| 43 | + for operand in operands { |
| 44 | + match *operand { |
| 45 | + InlineAsmOperand::In { reg, ref value } => { |
| 46 | + let reg = expect_reg(reg); |
| 47 | + clobbered_regs.push((reg, new_slot(reg.reg_class()))); |
| 48 | + inputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_operand(fx, value).load_scalar(fx))); |
| 49 | + } |
| 50 | + InlineAsmOperand::Out { reg, late: _, place } => { |
| 51 | + let reg = expect_reg(reg); |
| 52 | + clobbered_regs.push((reg, new_slot(reg.reg_class()))); |
| 53 | + if let Some(place) = place { |
| 54 | + outputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_place(fx, place))); |
| 55 | + } |
| 56 | + } |
| 57 | + InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => { |
| 58 | + let reg = expect_reg(reg); |
| 59 | + clobbered_regs.push((reg, new_slot(reg.reg_class()))); |
| 60 | + inputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_operand(fx, in_value).load_scalar(fx))); |
| 61 | + if let Some(out_place) = out_place { |
| 62 | + outputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_place(fx, out_place))); |
| 63 | + } |
| 64 | + } |
| 65 | + InlineAsmOperand::Const { value: _ } => todo!(), |
| 66 | + InlineAsmOperand::SymFn { value: _ } => todo!(), |
| 67 | + InlineAsmOperand::SymStatic { def_id: _ } => todo!(), |
| 68 | + } |
| 69 | + } |
| 70 | + |
| 71 | + let asm_name = format!("{}__inline_asm_{}", fx.tcx.symbol_name(fx.instance).name, /*FIXME*/0); |
| 72 | + |
| 73 | + let generated_asm = generate_asm_wrapper(&asm_name, InlineAsmArch::X86_64, options, template, clobbered_regs, &inputs, &outputs); |
| 74 | + fx.global_asm.push_str(&generated_asm); |
| 75 | + |
| 76 | + call_inline_asm(fx, &asm_name, slot_size, inputs, outputs); |
| 77 | +} |
| 78 | + |
| 79 | +fn generate_asm_wrapper( |
| 80 | + asm_name: &str, |
| 81 | + arch: InlineAsmArch, |
| 82 | + options: InlineAsmOptions, |
| 83 | + template: &[InlineAsmTemplatePiece], |
| 84 | + clobbered_regs: Vec<(InlineAsmReg, Size)>, |
| 85 | + inputs: &[(InlineAsmReg, Size, Value)], |
| 86 | + outputs: &[(InlineAsmReg, Size, CPlace<'_>)], |
| 87 | +) -> String { |
| 88 | + let mut generated_asm = String::new(); |
| 89 | + writeln!(generated_asm, ".globl {}", asm_name).unwrap(); |
| 90 | + writeln!(generated_asm, ".type {},@function", asm_name).unwrap(); |
| 91 | + writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap(); |
| 92 | + writeln!(generated_asm, "{}:", asm_name).unwrap(); |
| 93 | + |
| 94 | + generated_asm.push_str(".intel_syntax noprefix\n"); |
| 95 | + generated_asm.push_str(" push rbp\n"); |
| 96 | + generated_asm.push_str(" mov rbp,rdi\n"); |
| 97 | + |
| 98 | + // Save clobbered registers |
| 99 | + if !options.contains(InlineAsmOptions::NORETURN) { |
| 100 | + // FIXME skip registers saved by the calling convention |
| 101 | + for &(reg, offset) in &clobbered_regs { |
| 102 | + save_register(&mut generated_asm, arch, reg, offset); |
| 103 | + } |
| 104 | + } |
| 105 | + |
| 106 | + // Write input registers |
| 107 | + for &(reg, offset, _value) in inputs { |
| 108 | + restore_register(&mut generated_asm, arch, reg, offset); |
| 109 | + } |
| 110 | + |
| 111 | + if options.contains(InlineAsmOptions::ATT_SYNTAX) { |
| 112 | + generated_asm.push_str(".att_syntax\n"); |
| 113 | + } |
| 114 | + |
| 115 | + // The actual inline asm |
| 116 | + for piece in template { |
| 117 | + match piece { |
| 118 | + InlineAsmTemplatePiece::String(s) => { |
| 119 | + generated_asm.push_str(s); |
| 120 | + } |
| 121 | + InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span: _ } => todo!(), |
| 122 | + } |
| 123 | + } |
| 124 | + generated_asm.push('\n'); |
| 125 | + |
| 126 | + if options.contains(InlineAsmOptions::ATT_SYNTAX) { |
| 127 | + generated_asm.push_str(".intel_syntax noprefix\n"); |
| 128 | + } |
| 129 | + |
| 130 | + if !options.contains(InlineAsmOptions::NORETURN) { |
| 131 | + // Read output registers |
| 132 | + for &(reg, offset, _place) in outputs { |
| 133 | + save_register(&mut generated_asm, arch, reg, offset); |
| 134 | + } |
| 135 | + |
| 136 | + // Restore clobbered registers |
| 137 | + for &(reg, offset) in clobbered_regs.iter().rev() { |
| 138 | + restore_register(&mut generated_asm, arch, reg, offset); |
| 139 | + } |
| 140 | + |
| 141 | + generated_asm.push_str(" pop rbp\n"); |
| 142 | + generated_asm.push_str(" ret\n"); |
| 143 | + } else { |
| 144 | + generated_asm.push_str(" ud2\n"); |
| 145 | + } |
| 146 | + |
| 147 | + generated_asm.push_str(".att_syntax\n"); |
| 148 | + writeln!(generated_asm, ".size {name}, .-{name}", name=asm_name).unwrap(); |
| 149 | + generated_asm.push_str(".text\n"); |
| 150 | + generated_asm.push_str("\n\n"); |
| 151 | + |
| 152 | + generated_asm |
| 153 | +} |
| 154 | + |
| 155 | +fn call_inline_asm<'tcx>( |
| 156 | + fx: &mut FunctionCx<'_, 'tcx, impl Backend>, |
| 157 | + asm_name: &str, |
| 158 | + slot_size: Size, |
| 159 | + inputs: Vec<(InlineAsmReg, Size, Value)>, |
| 160 | + outputs: Vec<(InlineAsmReg, Size, CPlace<'tcx>)>, |
| 161 | +) { |
| 162 | + let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData { |
| 163 | + kind: StackSlotKind::ExplicitSlot, |
| 164 | + offset: None, |
| 165 | + size: u32::try_from(slot_size.bytes()).unwrap(), |
| 166 | + }); |
| 167 | + #[cfg(debug_assertions)] |
| 168 | + fx.add_comment(stack_slot, "inline asm scratch slot"); |
| 169 | + |
| 170 | + let inline_asm_func = fx.module.declare_function(asm_name, Linkage::Import, &Signature { |
| 171 | + call_conv: CallConv::SystemV, |
| 172 | + params: vec![AbiParam::new(fx.pointer_type)], |
| 173 | + returns: vec![], |
| 174 | + }).unwrap(); |
| 175 | + let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func); |
| 176 | + #[cfg(debug_assertions)] |
| 177 | + fx.add_comment(inline_asm_func, asm_name); |
| 178 | + |
| 179 | + for (_reg, offset, value) in inputs { |
| 180 | + fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap()); |
| 181 | + } |
| 182 | + |
| 183 | + let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0); |
| 184 | + fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]); |
| 185 | + |
| 186 | + for (_reg, offset, place) in outputs { |
| 187 | + let ty = fx.clif_type(place.layout().ty).unwrap(); |
| 188 | + let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap()); |
| 189 | + place.write_cvalue(fx, CValue::by_val(value, place.layout())); |
| 190 | + } |
| 191 | +} |
| 192 | + |
| 193 | +fn expect_reg(reg_or_class: InlineAsmRegOrRegClass) -> InlineAsmReg { |
| 194 | + match reg_or_class { |
| 195 | + InlineAsmRegOrRegClass::Reg(reg) => reg, |
| 196 | + InlineAsmRegOrRegClass::RegClass(class) => unimplemented!("{:?}", class), |
| 197 | + } |
| 198 | +} |
| 199 | + |
| 200 | +fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) { |
| 201 | + match arch { |
| 202 | + InlineAsmArch::X86_64 => { |
| 203 | + write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap(); |
| 204 | + reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap(); |
| 205 | + generated_asm.push('\n'); |
| 206 | + } |
| 207 | + _ => unimplemented!("save_register for {:?}", arch), |
| 208 | + } |
| 209 | +} |
| 210 | + |
| 211 | +fn restore_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) { |
| 212 | + match arch { |
| 213 | + InlineAsmArch::X86_64 => { |
| 214 | + generated_asm.push_str(" mov "); |
| 215 | + reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap(); |
| 216 | + writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap(); |
| 217 | + } |
| 218 | + _ => unimplemented!("restore_register for {:?}", arch), |
| 219 | + } |
| 220 | +} |
0 commit comments