@@ -194,12 +194,14 @@ namespace {
194
194
RValue convertAtomicTempToRValue (Address addr, AggValueSlot resultSlot,
195
195
SourceLocation loc, bool AsValue) const ;
196
196
197
- // / Converts a rvalue to integer value.
198
- llvm::Value *convertRValueToInt (RValue RVal) const ;
197
+ llvm::Value *getScalarRValValueOrNull (RValue RVal) const ;
199
198
200
- RValue ConvertIntToValueOrAtomic (llvm::Value *IntVal,
201
- AggValueSlot ResultSlot,
202
- SourceLocation Loc, bool AsValue) const ;
199
+ // / Converts an rvalue to integer value if needed.
200
+ llvm::Value *convertRValueToInt (RValue RVal, bool CastFP = true ) const ;
201
+
202
+ RValue ConvertToValueOrAtomic (llvm::Value *IntVal, AggValueSlot ResultSlot,
203
+ SourceLocation Loc, bool AsValue,
204
+ bool CastFP = true ) const ;
203
205
204
206
// / Copy an atomic r-value into atomic-layout memory.
205
207
void emitCopyIntoMemory (RValue rvalue) const ;
@@ -261,7 +263,8 @@ namespace {
261
263
void EmitAtomicLoadLibcall (llvm::Value *AddForLoaded,
262
264
llvm::AtomicOrdering AO, bool IsVolatile);
263
265
// / Emits atomic load as LLVM instruction.
264
- llvm::Value *EmitAtomicLoadOp (llvm::AtomicOrdering AO, bool IsVolatile);
266
+ llvm::Value *EmitAtomicLoadOp (llvm::AtomicOrdering AO, bool IsVolatile,
267
+ bool CastFP = true );
265
268
// / Emits atomic compare-and-exchange op as a libcall.
266
269
llvm::Value *EmitAtomicCompareExchangeLibcall (
267
270
llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
@@ -1396,12 +1399,13 @@ RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1396
1399
LVal.getBaseInfo (), TBAAAccessInfo ()));
1397
1400
}
1398
1401
1399
- RValue AtomicInfo::ConvertIntToValueOrAtomic (llvm::Value *IntVal ,
1400
- AggValueSlot ResultSlot,
1401
- SourceLocation Loc,
1402
- bool AsValue ) const {
1402
+ RValue AtomicInfo::ConvertToValueOrAtomic (llvm::Value *Val ,
1403
+ AggValueSlot ResultSlot,
1404
+ SourceLocation Loc, bool AsValue ,
1405
+ bool CastFP ) const {
1403
1406
// Try not to in some easy cases.
1404
- assert (IntVal->getType ()->isIntegerTy () && " Expected integer value" );
1407
+ assert ((Val->getType ()->isIntegerTy () || Val->getType ()->isIEEELikeFPTy ()) &&
1408
+ " Expected integer or floating point value" );
1405
1409
if (getEvaluationKind () == TEK_Scalar &&
1406
1410
(((!LVal.isBitField () ||
1407
1411
LVal.getBitFieldInfo ().Size == ValueSizeInBits) &&
@@ -1410,13 +1414,14 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1410
1414
auto *ValTy = AsValue
1411
1415
? CGF.ConvertTypeForMem (ValueTy)
1412
1416
: getAtomicAddress ().getElementType ();
1413
- if (ValTy->isIntegerTy ()) {
1414
- assert (IntVal->getType () == ValTy && " Different integer types." );
1415
- return RValue::get (CGF.EmitFromMemory (IntVal, ValueTy));
1417
+ if (ValTy->isIntegerTy () || (!CastFP && ValTy->isIEEELikeFPTy ())) {
1418
+ assert ((!ValTy->isIntegerTy () || Val->getType () == ValTy) &&
1419
+ " Different integer types." );
1420
+ return RValue::get (CGF.EmitFromMemory (Val, ValueTy));
1416
1421
} else if (ValTy->isPointerTy ())
1417
- return RValue::get (CGF.Builder .CreateIntToPtr (IntVal , ValTy));
1418
- else if (llvm::CastInst::isBitCastable (IntVal ->getType (), ValTy))
1419
- return RValue::get (CGF.Builder .CreateBitCast (IntVal , ValTy));
1422
+ return RValue::get (CGF.Builder .CreateIntToPtr (Val , ValTy));
1423
+ else if (llvm::CastInst::isBitCastable (Val ->getType (), ValTy))
1424
+ return RValue::get (CGF.Builder .CreateBitCast (Val , ValTy));
1420
1425
}
1421
1426
1422
1427
// Create a temporary. This needs to be big enough to hold the
@@ -1433,8 +1438,7 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1433
1438
1434
1439
// Slam the integer into the temporary.
1435
1440
Address CastTemp = castToAtomicIntPointer (Temp);
1436
- CGF.Builder .CreateStore (IntVal, CastTemp)
1437
- ->setVolatile (TempIsVolatile);
1441
+ CGF.Builder .CreateStore (Val, CastTemp)->setVolatile (TempIsVolatile);
1438
1442
1439
1443
return convertAtomicTempToRValue (Temp, ResultSlot, Loc, AsValue);
1440
1444
}
@@ -1453,9 +1457,11 @@ void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1453
1457
}
1454
1458
1455
1459
llvm::Value *AtomicInfo::EmitAtomicLoadOp (llvm::AtomicOrdering AO,
1456
- bool IsVolatile) {
1460
+ bool IsVolatile, bool CastFP ) {
1457
1461
// Okay, we're doing this natively.
1458
- Address Addr = getAtomicAddressAsAtomicIntPointer ();
1462
+ Address Addr = getAtomicAddress ();
1463
+ if (!(Addr.getElementType ()->isIEEELikeFPTy () && !CastFP))
1464
+ Addr = castToAtomicIntPointer (Addr);
1459
1465
llvm::LoadInst *Load = CGF.Builder .CreateLoad (Addr, " atomic-load" );
1460
1466
Load->setAtomic (AO);
1461
1467
@@ -1515,15 +1521,16 @@ RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1515
1521
}
1516
1522
1517
1523
// Okay, we're doing this natively.
1518
- auto *Load = EmitAtomicLoadOp (AO, IsVolatile);
1524
+ auto *Load = EmitAtomicLoadOp (AO, IsVolatile, /* CastFP= */ false );
1519
1525
1520
1526
// If we're ignoring an aggregate return, don't do anything.
1521
1527
if (getEvaluationKind () == TEK_Aggregate && ResultSlot.isIgnored ())
1522
1528
return RValue::getAggregate (Address::invalid (), false );
1523
1529
1524
1530
// Okay, turn that back into the original value or atomic (for non-simple
1525
1531
// lvalues) type.
1526
- return ConvertIntToValueOrAtomic (Load, ResultSlot, Loc, AsValue);
1532
+ return ConvertToValueOrAtomic (Load, ResultSlot, Loc, AsValue,
1533
+ /* CastFP=*/ false );
1527
1534
}
1528
1535
1529
1536
// / Emit a load from an l-value of atomic type. Note that the r-value
@@ -1586,12 +1593,18 @@ Address AtomicInfo::materializeRValue(RValue rvalue) const {
1586
1593
return TempLV.getAddress (CGF);
1587
1594
}
1588
1595
1589
- llvm::Value *AtomicInfo::convertRValueToInt (RValue RVal) const {
1596
+ llvm::Value *AtomicInfo::getScalarRValValueOrNull (RValue RVal) const {
1597
+ if (RVal.isScalar () && (!hasPadding () || !LVal.isSimple ()))
1598
+ return RVal.getScalarVal ();
1599
+ return nullptr ;
1600
+ }
1601
+
1602
+ llvm::Value *AtomicInfo::convertRValueToInt (RValue RVal, bool CastFP) const {
1590
1603
// If we've got a scalar value of the right size, try to avoid going
1591
- // through memory.
1592
- if (RVal. isScalar () && (! hasPadding () || !LVal. isSimple () )) {
1593
- llvm::Value *Value = RVal. getScalarVal ();
1594
- if (isa<llvm::IntegerType>( Value->getType ()))
1604
+ // through memory. Floats get casted if needed by AtomicExpandPass.
1605
+ if (llvm::Value *Value = getScalarRValValueOrNull (RVal )) {
1606
+ if (isa< llvm::IntegerType>( Value-> getType ()) ||
1607
+ (!CastFP && Value->getType ()-> isIEEELikeFPTy ()))
1595
1608
return CGF.EmitToMemory (Value, ValueTy);
1596
1609
else {
1597
1610
llvm::IntegerType *InputIntTy = llvm::IntegerType::get (
@@ -1677,8 +1690,8 @@ std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1677
1690
auto Res = EmitAtomicCompareExchangeOp (ExpectedVal, DesiredVal, Success,
1678
1691
Failure, IsWeak);
1679
1692
return std::make_pair (
1680
- ConvertIntToValueOrAtomic (Res.first , AggValueSlot::ignored (),
1681
- SourceLocation (), /* AsValue=*/ false ),
1693
+ ConvertToValueOrAtomic (Res.first , AggValueSlot::ignored (),
1694
+ SourceLocation (), /* AsValue=*/ false ),
1682
1695
Res.second );
1683
1696
}
1684
1697
@@ -1787,8 +1800,8 @@ void AtomicInfo::EmitAtomicUpdateOp(
1787
1800
requiresMemSetZero (getAtomicAddress ().getElementType ())) {
1788
1801
CGF.Builder .CreateStore (PHI, NewAtomicIntAddr);
1789
1802
}
1790
- auto OldRVal = ConvertIntToValueOrAtomic (PHI, AggValueSlot::ignored (),
1791
- SourceLocation (), /* AsValue=*/ false );
1803
+ auto OldRVal = ConvertToValueOrAtomic (PHI, AggValueSlot::ignored (),
1804
+ SourceLocation (), /* AsValue=*/ false );
1792
1805
EmitAtomicUpdateValue (CGF, *this , OldRVal, UpdateOp, NewAtomicAddr);
1793
1806
auto *DesiredVal = CGF.Builder .CreateLoad (NewAtomicIntAddr);
1794
1807
// Try to write new value using cmpxchg operation.
@@ -1953,13 +1966,22 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1953
1966
}
1954
1967
1955
1968
// Okay, we're doing this natively.
1956
- llvm::Value *intValue = atomics.convertRValueToInt (rvalue);
1969
+ llvm::Value *ValToStore =
1970
+ atomics.convertRValueToInt (rvalue, /* CastFP=*/ false );
1957
1971
1958
1972
// Do the atomic store.
1959
- Address addr = atomics.castToAtomicIntPointer (atomics.getAtomicAddress ());
1960
- intValue = Builder.CreateIntCast (
1961
- intValue, addr.getElementType (), /* isSigned=*/ false );
1962
- llvm::StoreInst *store = Builder.CreateStore (intValue, addr);
1973
+ Address Addr = atomics.getAtomicAddress ();
1974
+ bool ShouldCastToInt = true ;
1975
+ if (llvm::Value *Value = atomics.getScalarRValValueOrNull (rvalue))
1976
+ if (isa<llvm::IntegerType>(Value->getType ()) ||
1977
+ Value->getType ()->isIEEELikeFPTy ())
1978
+ ShouldCastToInt = false ;
1979
+ if (ShouldCastToInt) {
1980
+ Addr = atomics.castToAtomicIntPointer (Addr);
1981
+ ValToStore = Builder.CreateIntCast (ValToStore, Addr.getElementType (),
1982
+ /* isSigned=*/ false );
1983
+ }
1984
+ llvm::StoreInst *store = Builder.CreateStore (ValToStore, Addr);
1963
1985
1964
1986
if (AO == llvm::AtomicOrdering::Acquire)
1965
1987
AO = llvm::AtomicOrdering::Monotonic;
0 commit comments