@@ -565,6 +565,64 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
565
565
return NewMI;
566
566
}
567
567
568
+ static void foldInlineAsmMemOperand (MachineInstr *MI, unsigned OpNo, int FI,
569
+ const TargetInstrInfo &TII) {
570
+ MachineOperand &MO = MI->getOperand (OpNo);
571
+ const VirtRegInfo &RI = AnalyzeVirtRegInBundle (*MI, MO.getReg ());
572
+
573
+ // If the machine operand is tied, untie it first.
574
+ if (MO.isTied ()) {
575
+ unsigned TiedTo = MI->findTiedOperandIdx (OpNo);
576
+ MI->untieRegOperand (OpNo);
577
+ // Intentional recursion!
578
+ foldInlineAsmMemOperand (MI, TiedTo, FI, TII);
579
+ }
580
+
581
+ // Change the operand from a register to a frame index.
582
+ MO.ChangeToFrameIndex (FI, MO.getTargetFlags ());
583
+
584
+ SmallVector<MachineOperand, 4 > NewOps;
585
+ TII.getFrameIndexOperands (NewOps);
586
+ assert (!NewOps.empty () && " getFrameIndexOperands didn't create any operands" );
587
+ MI->insert (MI->operands_begin () + OpNo + 1 , NewOps);
588
+
589
+ // Change the previous operand to a MemKind InlineAsm::Flag. The second param
590
+ // is the per-target number of operands that represent the memory operand
591
+ // excluding this one (MD). This includes MO.
592
+ InlineAsm::Flag F (InlineAsm::Kind::Mem, NewOps.size () + 1 );
593
+ F.setMemConstraint (InlineAsm::ConstraintCode::m);
594
+ MachineOperand &MD = MI->getOperand (OpNo - 1 );
595
+ MD.setImm (F);
596
+
597
+ // Update mayload/maystore metadata.
598
+ MachineOperand &ExtraMO = MI->getOperand (InlineAsm::MIOp_ExtraInfo);
599
+ if (RI.Reads )
600
+ ExtraMO.setImm (ExtraMO.getImm () | InlineAsm::Extra_MayLoad);
601
+ if (RI.Writes )
602
+ ExtraMO.setImm (ExtraMO.getImm () | InlineAsm::Extra_MayStore);
603
+ }
604
+
605
+ // Returns nullptr if not possible to fold.
606
+ static MachineInstr *foldInlineAsmMemOperand (MachineInstr &MI,
607
+ ArrayRef<unsigned > Ops, int FI,
608
+ const TargetInstrInfo &TII) {
609
+ assert (MI.isInlineAsm () && " wrong opcode" );
610
+ if (Ops.size () > 1 )
611
+ return nullptr ;
612
+ unsigned Op = Ops[0 ];
613
+ assert (Op && " should never be first operand" );
614
+ assert (MI.getOperand (Op).isReg () && " shouldn't be folding non-reg operands" );
615
+
616
+ if (!MI.mayFoldInlineAsmRegOp (Op))
617
+ return nullptr ;
618
+
619
+ MachineInstr &NewMI = TII.duplicate (*MI.getParent (), MI.getIterator (), MI);
620
+
621
+ foldInlineAsmMemOperand (&NewMI, Op, FI, TII);
622
+
623
+ return &NewMI;
624
+ }
625
+
568
626
MachineInstr *TargetInstrInfo::foldMemoryOperand (MachineInstr &MI,
569
627
ArrayRef<unsigned > Ops, int FI,
570
628
LiveIntervals *LIS,
@@ -612,6 +670,8 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
612
670
NewMI = foldPatchpoint (MF, MI, Ops, FI, *this );
613
671
if (NewMI)
614
672
MBB->insert (MI, NewMI);
673
+ } else if (MI.isInlineAsm ()) {
674
+ NewMI = foldInlineAsmMemOperand (MI, Ops, FI, *this );
615
675
} else {
616
676
// Ask the target to do the actual folding.
617
677
NewMI = foldMemoryOperandImpl (MF, MI, Ops, MI, FI, LIS, VRM);
@@ -683,6 +743,8 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
683
743
NewMI = foldPatchpoint (MF, MI, Ops, FrameIndex, *this );
684
744
if (NewMI)
685
745
NewMI = &*MBB.insert (MI, NewMI);
746
+ } else if (MI.isInlineAsm () && isLoadFromStackSlot (LoadMI, FrameIndex)) {
747
+ NewMI = foldInlineAsmMemOperand (MI, Ops, FrameIndex, *this );
686
748
} else {
687
749
// Ask the target to do the actual folding.
688
750
NewMI = foldMemoryOperandImpl (MF, MI, Ops, MI, LoadMI, LIS);
0 commit comments