@@ -7559,80 +7559,6 @@ void SIInstrInfo::splitScalar64BitUnaryOp(SIInstrWorklist &Worklist,
7559
7559
addUsersToMoveToVALUWorklist (FullDestReg, MRI, Worklist);
7560
7560
}
7561
7561
7562
- void SIInstrInfo::splitScalar64BitAddSub (SIInstrWorklist &Worklist,
7563
- MachineInstr &Inst,
7564
- MachineDominatorTree *MDT) const {
7565
- bool IsAdd = (Inst.getOpcode () == AMDGPU::S_ADD_U64_PSEUDO);
7566
-
7567
- MachineBasicBlock &MBB = *Inst.getParent ();
7568
- MachineRegisterInfo &MRI = MBB.getParent ()->getRegInfo ();
7569
- const auto *CarryRC = RI.getRegClass (AMDGPU::SReg_1_XEXECRegClassID);
7570
-
7571
- Register FullDestReg = MRI.createVirtualRegister (&AMDGPU::VReg_64RegClass);
7572
- Register DestSub0 = MRI.createVirtualRegister (&AMDGPU::VGPR_32RegClass);
7573
- Register DestSub1 = MRI.createVirtualRegister (&AMDGPU::VGPR_32RegClass);
7574
-
7575
- Register CarryReg = MRI.createVirtualRegister (CarryRC);
7576
- Register DeadCarryReg = MRI.createVirtualRegister (CarryRC);
7577
-
7578
- MachineOperand &Dest = Inst.getOperand (0 );
7579
- MachineOperand &Src0 = Inst.getOperand (1 );
7580
- MachineOperand &Src1 = Inst.getOperand (2 );
7581
- const DebugLoc &DL = Inst.getDebugLoc ();
7582
- MachineBasicBlock::iterator MII = Inst;
7583
-
7584
- const TargetRegisterClass *Src0RC = MRI.getRegClass (Src0.getReg ());
7585
- const TargetRegisterClass *Src1RC = MRI.getRegClass (Src1.getReg ());
7586
- const TargetRegisterClass *Src0SubRC =
7587
- RI.getSubRegisterClass (Src0RC, AMDGPU::sub0);
7588
- const TargetRegisterClass *Src1SubRC =
7589
- RI.getSubRegisterClass (Src1RC, AMDGPU::sub0);
7590
-
7591
- MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm (MII, MRI, Src0, Src0RC,
7592
- AMDGPU::sub0, Src0SubRC);
7593
- MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm (MII, MRI, Src1, Src1RC,
7594
- AMDGPU::sub0, Src1SubRC);
7595
-
7596
-
7597
- MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm (MII, MRI, Src0, Src0RC,
7598
- AMDGPU::sub1, Src0SubRC);
7599
- MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm (MII, MRI, Src1, Src1RC,
7600
- AMDGPU::sub1, Src1SubRC);
7601
-
7602
- unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
7603
- MachineInstr *LoHalf =
7604
- BuildMI (MBB, MII, DL, get (LoOpc), DestSub0)
7605
- .addReg (CarryReg, RegState::Define)
7606
- .add (SrcReg0Sub0)
7607
- .add (SrcReg1Sub0)
7608
- .addImm (0 ); // clamp bit
7609
-
7610
- unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
7611
- MachineInstr *HiHalf =
7612
- BuildMI (MBB, MII, DL, get (HiOpc), DestSub1)
7613
- .addReg (DeadCarryReg, RegState::Define | RegState::Dead)
7614
- .add (SrcReg0Sub1)
7615
- .add (SrcReg1Sub1)
7616
- .addReg (CarryReg, RegState::Kill)
7617
- .addImm (0 ); // clamp bit
7618
-
7619
- BuildMI (MBB, MII, DL, get (TargetOpcode::REG_SEQUENCE), FullDestReg)
7620
- .addReg (DestSub0)
7621
- .addImm (AMDGPU::sub0)
7622
- .addReg (DestSub1)
7623
- .addImm (AMDGPU::sub1);
7624
-
7625
- MRI.replaceRegWith (Dest.getReg (), FullDestReg);
7626
-
7627
- // Try to legalize the operands in case we need to swap the order to keep it
7628
- // valid.
7629
- legalizeOperands (*LoHalf, MDT);
7630
- legalizeOperands (*HiHalf, MDT);
7631
-
7632
- // Move all users of this moved value.
7633
- addUsersToMoveToVALUWorklist (FullDestReg, MRI, Worklist);
7634
- }
7635
-
7636
7562
void SIInstrInfo::splitScalar64BitBinaryOp (SIInstrWorklist &Worklist,
7637
7563
MachineInstr &Inst, unsigned Opcode,
7638
7564
MachineDominatorTree *MDT) const {
0 commit comments