Skip to content

Commit 4d9861a

Browse files
committed
Move bitfield clipping into bitfield accumulation
1 parent 49839f9 commit 4d9861a

File tree

2 files changed

+55
-26
lines changed

2 files changed

+55
-26
lines changed

clang/lib/CodeGen/CGRecordLayoutBuilder.cpp

Lines changed: 37 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,11 @@ namespace {
4141
/// contains enough information to determine where the runs break. Microsoft
4242
/// and Itanium follow different rules and use different codepaths.
4343
/// * It is desired that, when possible, bitfields use the appropriate iN type
44-
/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
44+
/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
4545
/// i24. This isn't always possible because i24 has storage size of 32 bit
46-
/// and if it is possible to use that extra byte of padding we must use
47-
/// [i8 x 3] instead of i24. The function clipTailPadding does this.
46+
/// and if it is possible to use that extra byte of padding we must use [i8 x
47+
/// 3] instead of i24. This is computed when accumulating bitfields in
48+
/// accumulateBitfields.
4849
/// C++ examples that require clipping:
4950
/// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
5051
/// struct A { int a : 24; ~A(); }; // a must be clipped because:
@@ -197,9 +198,7 @@ struct CGRecordLowering {
197198
/// not the primary vbase of some base class.
198199
bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
199200
void calculateZeroInit();
200-
/// Lowers bitfield storage types to I8 arrays for bitfields with tail
201-
/// padding that is or can potentially be used.
202-
void clipTailPadding();
201+
void checkTailPadding();
203202
/// Determines if we need a packed llvm struct.
204203
void determinePacked(bool NVBaseType);
205204
/// Inserts padding everywhere it's needed.
@@ -302,7 +301,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
302301
}
303302
llvm::stable_sort(Members);
304303
Members.push_back(StorageInfo(Size, getIntNType(8)));
305-
clipTailPadding();
304+
checkTailPadding();
306305
determinePacked(NVBaseType);
307306
insertPadding();
308307
Members.pop_back();
@@ -521,6 +520,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
521520
// available padding characters.
522521
RecordDecl::field_iterator BestEnd = Begin;
523522
CharUnits BestEndOffset;
523+
bool BestClipped; // Whether the representation must be in a byte array.
524524

525525
for (;;) {
526526
// AtAlignedBoundary is true iff Field is the (potential) start of a new
@@ -583,10 +583,9 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
583583
// this is the best seen so far.
584584
BestEnd = Field;
585585
BestEndOffset = BeginOffset + AccessSize;
586-
if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
587-
// Fine-grained access, so no merging of spans.
588-
InstallBest = true;
589-
else if (!BitSizeSinceBegin)
586+
// Assume clipped until proven not below.
587+
BestClipped = true;
588+
if (!BitSizeSinceBegin)
590589
// A zero-sized initial span -- this will install nothing and reset
591590
// for another.
592591
InstallBest = true;
@@ -614,6 +613,12 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
614613
// The access unit is not at a naturally aligned offset within the
615614
// structure.
616615
InstallBest = true;
616+
617+
if (InstallBest && BestEnd == Field)
618+
// We're installing the first span, who's clipping was
619+
// conservatively presumed above. Compute it correctly.
620+
if (getSize(Type) == AccessSize)
621+
BestClipped = false;
617622
}
618623

619624
if (!InstallBest) {
@@ -642,11 +647,15 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
642647
// access unit.
643648
BestEndOffset = BeginOffset + TypeSize;
644649
BestEnd = Field;
650+
BestClipped = false;
645651
}
646652

647653
if (Barrier)
648654
// The next field is a barrier that we cannot merge across.
649655
InstallBest = true;
656+
else if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
657+
// Fine-grained access, so no merging of spans.
658+
InstallBest = true;
650659
else
651660
// Otherwise, we're not installing. Update the bit size
652661
// of the current span to go all the way to LimitOffset, which is
@@ -665,7 +674,17 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
665674
// Add the storage member for the access unit to the record. The
666675
// bitfields get the offset of their storage but come afterward and
667676
// remain there after a stable sort.
668-
llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
677+
llvm::Type *Type;
678+
if (BestClipped) {
679+
assert(getSize(getIntNType(Context.toBits(AccessSize))) >
680+
AccessSize &&
681+
"Clipped access need not be clipped");
682+
Type = getByteArrayType(AccessSize);
683+
} else {
684+
Type = getIntNType(Context.toBits(AccessSize));
685+
assert(getSize(Type) == AccessSize &&
686+
"Unclipped access must be clipped");
687+
}
669688
Members.push_back(StorageInfo(BeginOffset, Type));
670689
for (; Begin != BestEnd; ++Begin)
671690
if (!Begin->isZeroLengthBitField(Context))
@@ -911,7 +930,9 @@ void CGRecordLowering::calculateZeroInit() {
911930
}
912931
}
913932

914-
void CGRecordLowering::clipTailPadding() {
933+
// Verify accumulateBitfields computed the correct storage representations.
934+
void CGRecordLowering::checkTailPadding() {
935+
#ifndef NDEBUG
915936
std::vector<MemberInfo>::iterator Prior = Members.begin();
916937
CharUnits Tail = getSize(Prior->Data);
917938
for (std::vector<MemberInfo>::iterator Member = Prior + 1,
@@ -920,23 +941,13 @@ void CGRecordLowering::clipTailPadding() {
920941
// Only members with data and the scissor can cut into tail padding.
921942
if (!Member->Data && Member->Kind != MemberInfo::Scissor)
922943
continue;
923-
if (Member->Offset < Tail) {
924-
assert(Prior->Kind == MemberInfo::Field &&
925-
"Only storage fields have tail padding!");
926-
if (!Prior->FD || Prior->FD->isBitField())
927-
Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
928-
cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
929-
else {
930-
assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
931-
"should not have reused this field's tail padding");
932-
Prior->Data = getByteArrayType(
933-
Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
934-
}
935-
}
944+
945+
assert(Member->Offset >= Tail && "bitfield not already clipped");
936946
if (Member->Data)
937947
Prior = Member;
938948
Tail = Prior->Offset + getSize(Prior->Data);
939949
}
950+
#endif
940951
}
941952

942953
void CGRecordLowering::determinePacked(bool NVBaseType) {

clang/test/CodeGen/bitfield-access-unit.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -222,6 +222,24 @@ struct G {
222222
// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:4
223223
// CHECK-NEXT: ]>
224224

225+
struct __attribute__((aligned(8))) H {
226+
char a;
227+
unsigned b : 24; // on expensive alignment we want this to stay 24
228+
unsigned c __attribute__((aligned(8))); // Think 'long long' or lp64 ptr
229+
} h;
230+
// CHECK-LABEL: LLVMType:%struct.H =
231+
// LAYOUT-FLEX-SAME: type <{ i8, i32, [3 x i8], i32, [4 x i8] }>
232+
// LAYOUT-STRICT-SAME: type { i8, [3 x i8], [4 x i8], i32, [4 x i8] }
233+
// LAYOUT-DWN32-FLEX-SAME: type <{ i8, i32, [3 x i8], i32, [4 x i8] }>
234+
// LAYOUT-DWN32-STRICT-SAME: type { i8, [3 x i8], [4 x i8], i32, [4 x i8] }
235+
// CHECK: BitFields:[
236+
// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:24 IsSigned:0 StorageSize:32 StorageOffset:1
237+
// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:24 IsSigned:0 StorageSize:24 StorageOffset:1
238+
239+
// LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:24 IsSigned:0 StorageSize:32 StorageOffset:1
240+
// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:24 IsSigned:0 StorageSize:24 StorageOffset:1
241+
// CHECK-NEXT: ]>
242+
225243
#if _LP64
226244
struct A64 {
227245
int a : 16;

0 commit comments

Comments
 (0)