Skip to content

Commit 53e9064

Browse files
committed
IRGen: Use the header size for the initial offset in non fixed heap layouts
When the first element in the heap layout was non fixed we would use the mininum size of the total heap layout for the initial offset. This would create unneccessary large heap layouts. rdar://61716736
1 parent 240a86f commit 53e9064

File tree

4 files changed

+44
-2
lines changed

4 files changed

+44
-2
lines changed

lib/IRGen/GenHeap.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ static llvm::Value *calcInitOffset(swift::irgen::IRGenFunction &IGF,
285285
const swift::irgen::HeapLayout &layout) {
286286
llvm::Value *offset = nullptr;
287287
if (i == 0) {
288-
auto startoffset = layout.getSize();
288+
auto startoffset = layout.getHeaderSize();
289289
offset = llvm::ConstantInt::get(IGF.IGM.SizeTy, startoffset.getValue());
290290
return offset;
291291
}

lib/IRGen/StructLayout.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ StructLayout::StructLayout(IRGenModule &IGM,
7070
assert(!builder.empty() == requiresHeapHeader(layoutKind));
7171
MinimumAlign = Alignment(1);
7272
MinimumSize = Size(0);
73+
headerSize = builder.getHeaderSize();
7374
SpareBits.clear();
7475
IsFixedLayout = true;
7576
IsKnownPOD = IsPOD;
@@ -79,6 +80,7 @@ StructLayout::StructLayout(IRGenModule &IGM,
7980
} else {
8081
MinimumAlign = builder.getAlignment();
8182
MinimumSize = builder.getSize();
83+
headerSize = builder.getHeaderSize();
8284
SpareBits = builder.getSpareBits();
8385
IsFixedLayout = builder.isFixedLayout();
8486
IsKnownPOD = builder.isPOD();
@@ -186,13 +188,15 @@ void StructLayoutBuilder::addHeapHeader() {
186188
CurSize = IGM.RefCountedStructSize;
187189
CurAlignment = IGM.getPointerAlignment();
188190
StructFields.push_back(IGM.RefCountedStructTy);
191+
headerSize = CurSize;
189192
}
190193

191194
void StructLayoutBuilder::addNSObjectHeader() {
192195
assert(StructFields.empty() && "adding heap header at a non-zero offset");
193196
CurSize = IGM.getPointerSize();
194197
CurAlignment = IGM.getPointerAlignment();
195198
StructFields.push_back(IGM.ObjCClassPtrTy);
199+
headerSize = CurSize;
196200
}
197201

198202
bool StructLayoutBuilder::addFields(llvm::MutableArrayRef<ElementLayout> elts,

lib/IRGen/StructLayout.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -253,6 +253,7 @@ class StructLayoutBuilder {
253253
IRGenModule &IGM;
254254
SmallVector<llvm::Type*, 8> StructFields;
255255
Size CurSize = Size(0);
256+
Size headerSize = Size(0);
256257
private:
257258
Alignment CurAlignment = Alignment(1);
258259
SmallVector<SpareBitVector, 8> CurSpareBits;
@@ -314,6 +315,9 @@ class StructLayoutBuilder {
314315
/// Return the size of the structure built so far.
315316
Size getSize() const { return CurSize; }
316317

318+
// Return the size of the header.
319+
Size getHeaderSize() const { return headerSize; }
320+
317321
/// Return the alignment of the structure built so far.
318322
Alignment getAlignment() const { return CurAlignment; }
319323

@@ -350,6 +354,9 @@ class StructLayout {
350354

351355
/// The statically-known minimum bound on the size.
352356
Size MinimumSize;
357+
358+
/// The size of a header if present.
359+
Size headerSize;
353360

354361
/// The statically-known spare bit mask.
355362
SpareBitVector SpareBits;
@@ -386,6 +393,7 @@ class StructLayout {
386393
ArrayRef<ElementLayout> elements)
387394
: MinimumAlign(builder.getAlignment()),
388395
MinimumSize(builder.getSize()),
396+
headerSize(builder.getHeaderSize()),
389397
SpareBits(builder.getSpareBits()),
390398
IsFixedLayout(builder.isFixedLayout()),
391399
IsKnownPOD(builder.isPOD()),
@@ -401,6 +409,7 @@ class StructLayout {
401409

402410
llvm::Type *getType() const { return Ty; }
403411
Size getSize() const { return MinimumSize; }
412+
Size getHeaderSize() const { return headerSize; }
404413
Alignment getAlignment() const { return MinimumAlign; }
405414
const SpareBitVector &getSpareBits() const { return SpareBits; }
406415
SpareBitVector &getSpareBits() { return SpareBits; }

test/IRGen/partial_apply.sil

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
1-
// RUN: %target-swift-frontend %s -emit-ir | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize
1+
// RUN: %empty-directory(%t)
2+
// RUN: %target-swift-frontend -emit-module -enable-library-evolution -emit-module-path=%t/resilient_struct.swiftmodule -module-name=resilient_struct %S/../Inputs/resilient_struct.swift
3+
// RUN: %target-swift-frontend -I %t -emit-ir %s | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize
24

35
// REQUIRES: CPU=x86_64
46

57
import Builtin
68
import Swift
9+
import resilient_struct
710

811
class SwiftClass {}
912
sil_vtable SwiftClass {}
@@ -738,3 +741,29 @@ bb0(%x : $*SwiftClassPair):
738741
%t = tuple()
739742
return %t : $()
740743
}
744+
745+
sil public_external @closure : $@convention(thin) (@in_guaranteed ResilientInt, @guaranteed SwiftClass) -> ()
746+
747+
// Make sure that we use the heap header size (16) for the initial offset.
748+
// CHECK: define swiftcc void @test_initial_offset(%swift.opaque* noalias nocapture %0, %T13partial_apply10SwiftClassC* %1)
749+
// CHECK: [[T0:%.*]] = call swiftcc %swift.metadata_response @"$s16resilient_struct12ResilientIntVMa"
750+
// CHECK: [[MD:%.*]] = extractvalue %swift.metadata_response [[T0]], 0
751+
// CHECK: [[CAST:%.*]] = bitcast %swift.type* [[MD]] to i8***
752+
// CHECK: [[VWT_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CAST]], i64 -1
753+
// CHECK: [[VWT_TMP:%.*]] = load i8**, i8*** [[VWT_PTR]]
754+
// CHECK: [[VWT:%.*]] = bitcast i8** [[VWT_TMP]] to %swift.vwtable*
755+
// CHECK: [[FLAGS_PTR:%.*]] = getelementptr inbounds %swift.vwtable, %swift.vwtable* [[VWT]], i32 0, i32 10
756+
// CHECK: [[FLAGS:%.*]] = load i32, i32* [[FLAGS_PTR]]
757+
// CHECK: [[FLAGS2:%.*]] = zext i32 [[FLAGS]] to i64
758+
// CHECK: [[ALIGNMASK:%.*]] = and i64 [[FLAGS2]], 255
759+
// CHECK: = xor i64 [[ALIGNMASK]], -1
760+
// CHECK: = add i64 16, [[ALIGNMASK]]
761+
762+
sil @test_initial_offset : $@convention(thin) (@in_guaranteed ResilientInt, @guaranteed SwiftClass) -> () {
763+
bb0(%x : $*ResilientInt, %y : $SwiftClass):
764+
%f = function_ref @closure : $@convention(thin) (@in_guaranteed ResilientInt, @guaranteed SwiftClass) -> ()
765+
%p = partial_apply [callee_guaranteed] %f(%x, %y) : $@convention(thin) (@in_guaranteed ResilientInt, @guaranteed SwiftClass) -> ()
766+
release_value %p : $@callee_guaranteed () ->()
767+
%t = tuple()
768+
return %t : $()
769+
}

0 commit comments

Comments
 (0)