@@ -41,10 +41,11 @@ namespace {
41
41
// / contains enough information to determine where the runs break. Microsoft
42
42
// / and Itanium follow different rules and use different codepaths.
43
43
// / * It is desired that, when possible, bitfields use the appropriate iN type
44
- // / when lowered to llvm types. For example unsigned x : 24 gets lowered to
44
+ // / when lowered to llvm types. For example unsigned x : 24 gets lowered to
45
45
// / i24. This isn't always possible because i24 has storage size of 32 bit
46
- // / and if it is possible to use that extra byte of padding we must use
47
- // / [i8 x 3] instead of i24. The function clipTailPadding does this.
46
+ // / and if it is possible to use that extra byte of padding we must use [i8 x
47
+ // / 3] instead of i24. This is computed when accumulating bitfields in
48
+ // / accumulateBitfields.
48
49
// / C++ examples that require clipping:
49
50
// / struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
50
51
// / struct A { int a : 24; ~A(); }; // a must be clipped because:
@@ -197,9 +198,7 @@ struct CGRecordLowering {
197
198
// / not the primary vbase of some base class.
198
199
bool hasOwnStorage (const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
199
200
void calculateZeroInit ();
200
- // / Lowers bitfield storage types to I8 arrays for bitfields with tail
201
- // / padding that is or can potentially be used.
202
- void clipTailPadding ();
201
+ void checkTailPadding ();
203
202
// / Determines if we need a packed llvm struct.
204
203
void determinePacked (bool NVBaseType);
205
204
// / Inserts padding everywhere it's needed.
@@ -302,7 +301,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
302
301
}
303
302
llvm::stable_sort (Members);
304
303
Members.push_back (StorageInfo (Size, getIntNType (8 )));
305
- clipTailPadding ();
304
+ checkTailPadding ();
306
305
determinePacked (NVBaseType);
307
306
insertPadding ();
308
307
Members.pop_back ();
@@ -521,6 +520,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
521
520
// available padding characters.
522
521
RecordDecl::field_iterator BestEnd = Begin;
523
522
CharUnits BestEndOffset;
523
+ bool BestClipped; // Whether the representation must be in a byte array.
524
524
525
525
for (;;) {
526
526
// AtAlignedBoundary is true iff Field is the (potential) start of a new
@@ -583,10 +583,9 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
583
583
// this is the best seen so far.
584
584
BestEnd = Field;
585
585
BestEndOffset = BeginOffset + AccessSize;
586
- if (Types.getCodeGenOpts ().FineGrainedBitfieldAccesses )
587
- // Fine-grained access, so no merging of spans.
588
- InstallBest = true ;
589
- else if (!BitSizeSinceBegin)
586
+ // Assume clipped until proven not below.
587
+ BestClipped = true ;
588
+ if (!BitSizeSinceBegin)
590
589
// A zero-sized initial span -- this will install nothing and reset
591
590
// for another.
592
591
InstallBest = true ;
@@ -614,6 +613,12 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
614
613
// The access unit is not at a naturally aligned offset within the
615
614
// structure.
616
615
InstallBest = true ;
616
+
617
+ if (InstallBest && BestEnd == Field)
618
+ // We're installing the first span, who's clipping was
619
+ // conservatively presumed above. Compute it correctly.
620
+ if (getSize (Type) == AccessSize)
621
+ BestClipped = false ;
617
622
}
618
623
619
624
if (!InstallBest) {
@@ -642,11 +647,15 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
642
647
// access unit.
643
648
BestEndOffset = BeginOffset + TypeSize;
644
649
BestEnd = Field;
650
+ BestClipped = false ;
645
651
}
646
652
647
653
if (Barrier)
648
654
// The next field is a barrier that we cannot merge across.
649
655
InstallBest = true ;
656
+ else if (Types.getCodeGenOpts ().FineGrainedBitfieldAccesses )
657
+ // Fine-grained access, so no merging of spans.
658
+ InstallBest = true ;
650
659
else
651
660
// Otherwise, we're not installing. Update the bit size
652
661
// of the current span to go all the way to LimitOffset, which is
@@ -665,7 +674,17 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
665
674
// Add the storage member for the access unit to the record. The
666
675
// bitfields get the offset of their storage but come afterward and
667
676
// remain there after a stable sort.
668
- llvm::Type *Type = getIntNType (Context.toBits (AccessSize));
677
+ llvm::Type *Type;
678
+ if (BestClipped) {
679
+ assert (getSize (getIntNType (Context.toBits (AccessSize))) >
680
+ AccessSize &&
681
+ " Clipped access need not be clipped" );
682
+ Type = getByteArrayType (AccessSize);
683
+ } else {
684
+ Type = getIntNType (Context.toBits (AccessSize));
685
+ assert (getSize (Type) == AccessSize &&
686
+ " Unclipped access must be clipped" );
687
+ }
669
688
Members.push_back (StorageInfo (BeginOffset, Type));
670
689
for (; Begin != BestEnd; ++Begin)
671
690
if (!Begin->isZeroLengthBitField (Context))
@@ -911,7 +930,9 @@ void CGRecordLowering::calculateZeroInit() {
911
930
}
912
931
}
913
932
914
- void CGRecordLowering::clipTailPadding () {
933
+ // Verify accumulateBitfields computed the correct storage representations.
934
+ void CGRecordLowering::checkTailPadding () {
935
+ #ifndef NDEBUG
915
936
std::vector<MemberInfo>::iterator Prior = Members.begin ();
916
937
CharUnits Tail = getSize (Prior->Data );
917
938
for (std::vector<MemberInfo>::iterator Member = Prior + 1 ,
@@ -920,23 +941,13 @@ void CGRecordLowering::clipTailPadding() {
920
941
// Only members with data and the scissor can cut into tail padding.
921
942
if (!Member->Data && Member->Kind != MemberInfo::Scissor)
922
943
continue ;
923
- if (Member->Offset < Tail) {
924
- assert (Prior->Kind == MemberInfo::Field &&
925
- " Only storage fields have tail padding!" );
926
- if (!Prior->FD || Prior->FD ->isBitField ())
927
- Prior->Data = getByteArrayType (bitsToCharUnits (llvm::alignTo (
928
- cast<llvm::IntegerType>(Prior->Data )->getIntegerBitWidth (), 8 )));
929
- else {
930
- assert (Prior->FD ->hasAttr <NoUniqueAddressAttr>() &&
931
- " should not have reused this field's tail padding" );
932
- Prior->Data = getByteArrayType (
933
- Context.getTypeInfoDataSizeInChars (Prior->FD ->getType ()).Width );
934
- }
935
- }
944
+
945
+ assert (Member->Offset >= Tail && " bitfield not already clipped" );
936
946
if (Member->Data )
937
947
Prior = Member;
938
948
Tail = Prior->Offset + getSize (Prior->Data );
939
949
}
950
+ #endif
940
951
}
941
952
942
953
void CGRecordLowering::determinePacked (bool NVBaseType) {
0 commit comments