@@ -41,10 +41,11 @@ namespace {
41
41
// / contains enough information to determine where the runs break. Microsoft
42
42
// / and Itanium follow different rules and use different codepaths.
43
43
// / * It is desired that, when possible, bitfields use the appropriate iN type
44
- // / when lowered to llvm types. For example unsigned x : 24 gets lowered to
44
+ // / when lowered to llvm types. For example unsigned x : 24 gets lowered to
45
45
// / i24. This isn't always possible because i24 has storage size of 32 bit
46
- // / and if it is possible to use that extra byte of padding we must use
47
- // / [i8 x 3] instead of i24. The function clipTailPadding does this.
46
+ // / and if it is possible to use that extra byte of padding we must use [i8 x
47
+ // / 3] instead of i24. This is computed when accumulating bitfields in
48
+ // / accumulateBitfields.
48
49
// / C++ examples that require clipping:
49
50
// / struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
50
51
// / struct A { int a : 24; ~A(); }; // a must be clipped because:
@@ -62,11 +63,7 @@ namespace {
62
63
// / that the tail padding is not used in the complete class.) However,
63
64
// / because LLVM reads from the complete type it can generate incorrect code
64
65
// / if we do not clip the tail padding off of the bitfield in the complete
65
- // / layout. This introduces a somewhat awkward extra unnecessary clip stage.
66
- // / The location of the clip is stored internally as a sentinel of type
67
- // / SCISSOR. If LLVM were updated to read base types (which it probably
68
- // / should because locations of things such as VBases are bogus in the llvm
69
- // / type anyway) then we could eliminate the SCISSOR.
66
+ // / layout.
70
67
// / * Itanium allows nearly empty primary virtual bases. These bases don't get
71
68
// / get their own storage because they're laid out as part of another base
72
69
// / or at the beginning of the structure. Determining if a VBase actually
@@ -200,9 +197,7 @@ struct CGRecordLowering {
200
197
const CXXRecordDecl *Query) const ;
201
198
void calculateZeroInit ();
202
199
CharUnits calculateTailClippingOffset (bool isNonVirtualBaseType) const ;
203
- // / Lowers bitfield storage types to I8 arrays for bitfields with tail
204
- // / padding that is or can potentially be used.
205
- void clipTailPadding ();
200
+ void checkBitfieldClipping () const ;
206
201
// / Determines if we need a packed llvm struct.
207
202
void determinePacked (bool NVBaseType);
208
203
// / Inserts padding everywhere it's needed.
@@ -305,7 +300,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
305
300
}
306
301
llvm::stable_sort (Members);
307
302
Members.push_back (StorageInfo (Size, getIntNType (8 )));
308
- clipTailPadding ();
303
+ checkBitfieldClipping ();
309
304
determinePacked (NVBaseType);
310
305
insertPadding ();
311
306
Members.pop_back ();
@@ -531,6 +526,7 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
531
526
// available padding characters.
532
527
RecordDecl::field_iterator BestEnd = Begin;
533
528
CharUnits BestEndOffset;
529
+ bool BestClipped; // Whether the representation must be in a byte array.
534
530
535
531
for (;;) {
536
532
// AtAlignedBoundary is true iff Field is the (potential) start of a new
@@ -593,10 +589,9 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
593
589
// this is the best seen so far.
594
590
BestEnd = Field;
595
591
BestEndOffset = BeginOffset + AccessSize;
596
- if (Types.getCodeGenOpts ().FineGrainedBitfieldAccesses )
597
- // Fine-grained access, so no merging of spans.
598
- InstallBest = true ;
599
- else if (!BitSizeSinceBegin)
592
+ // Assume clipped until proven not below.
593
+ BestClipped = true ;
594
+ if (!BitSizeSinceBegin)
600
595
// A zero-sized initial span -- this will install nothing and reset
601
596
// for another.
602
597
InstallBest = true ;
@@ -624,6 +619,12 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
624
619
// The access unit is not at a naturally aligned offset within the
625
620
// structure.
626
621
InstallBest = true ;
622
+
623
+ if (InstallBest && BestEnd == Field)
624
+ // We're installing the first span, whose clipping was presumed
625
+ // above. Compute it correctly.
626
+ if (getSize (Type) == AccessSize)
627
+ BestClipped = false ;
627
628
}
628
629
629
630
if (!InstallBest) {
@@ -656,11 +657,15 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
656
657
// access unit.
657
658
BestEndOffset = BeginOffset + TypeSize;
658
659
BestEnd = Field;
660
+ BestClipped = false ;
659
661
}
660
662
661
663
if (Barrier)
662
664
// The next field is a barrier that we cannot merge across.
663
665
InstallBest = true ;
666
+ else if (Types.getCodeGenOpts ().FineGrainedBitfieldAccesses )
667
+ // Fine-grained access, so no merging of spans.
668
+ InstallBest = true ;
664
669
else
665
670
// Otherwise, we're not installing. Update the bit size
666
671
// of the current span to go all the way to LimitOffset, which is
@@ -679,7 +684,17 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
679
684
// Add the storage member for the access unit to the record. The
680
685
// bitfields get the offset of their storage but come afterward and
681
686
// remain there after a stable sort.
682
- llvm::Type *Type = getIntNType (Context.toBits (AccessSize));
687
+ llvm::Type *Type;
688
+ if (BestClipped) {
689
+ assert (getSize (getIntNType (Context.toBits (AccessSize))) >
690
+ AccessSize &&
691
+ " Clipped access need not be clipped" );
692
+ Type = getByteArrayType (AccessSize);
693
+ } else {
694
+ Type = getIntNType (Context.toBits (AccessSize));
695
+ assert (getSize (Type) == AccessSize &&
696
+ " Unclipped access must be clipped" );
697
+ }
683
698
Members.push_back (StorageInfo (BeginOffset, Type));
684
699
for (; Begin != BestEnd; ++Begin)
685
700
if (!Begin->isZeroLengthBitField (Context))
@@ -934,32 +949,21 @@ void CGRecordLowering::calculateZeroInit() {
934
949
}
935
950
}
936
951
937
- void CGRecordLowering::clipTailPadding () {
938
- std::vector<MemberInfo>::iterator Prior = Members.begin ();
939
- CharUnits Tail = getSize (Prior->Data );
940
- for (std::vector<MemberInfo>::iterator Member = Prior + 1 ,
941
- MemberEnd = Members.end ();
942
- Member != MemberEnd; ++Member) {
952
+ // Verify accumulateBitfields computed the correct storage representations.
953
+ void CGRecordLowering::checkBitfieldClipping () const {
954
+ #ifndef NDEBUG
955
+ auto Tail = CharUnits::Zero ();
956
+ for (const auto &M : Members) {
943
957
// Only members with data and the scissor can cut into tail padding.
944
- if (!Member-> Data && Member-> Kind != MemberInfo::Scissor)
958
+ if (!M. Data && M. Kind != MemberInfo::Scissor)
945
959
continue ;
946
- if (Member->Offset < Tail) {
947
- assert (Prior->Kind == MemberInfo::Field &&
948
- " Only storage fields have tail padding!" );
949
- if (!Prior->FD || Prior->FD ->isBitField ())
950
- Prior->Data = getByteArrayType (bitsToCharUnits (llvm::alignTo (
951
- cast<llvm::IntegerType>(Prior->Data )->getIntegerBitWidth (), 8 )));
952
- else {
953
- assert (Prior->FD ->hasAttr <NoUniqueAddressAttr>() &&
954
- " should not have reused this field's tail padding" );
955
- Prior->Data = getByteArrayType (
956
- Context.getTypeInfoDataSizeInChars (Prior->FD ->getType ()).Width );
957
- }
958
- }
959
- if (Member->Data )
960
- Prior = Member;
961
- Tail = Prior->Offset + getSize (Prior->Data );
960
+
961
+ assert (M.Offset >= Tail && " Bitfield access unit is not clipped" );
962
+ Tail = M.Offset ;
963
+ if (M.Data )
964
+ Tail += getSize (M.Data );
962
965
}
966
+ #endif
963
967
}
964
968
965
969
void CGRecordLowering::determinePacked (bool NVBaseType) {
0 commit comments