Skip to content

IRGen: Use the header size for the initial offset in non fixed heap l… #31049

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lib/IRGen/GenHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ static llvm::Value *calcInitOffset(swift::irgen::IRGenFunction &IGF,
const swift::irgen::HeapLayout &layout) {
llvm::Value *offset = nullptr;
if (i == 0) {
auto startoffset = layout.getSize();
auto startoffset = layout.getHeaderSize();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you rename this to startOffset?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will change in a follow-up.

offset = llvm::ConstantInt::get(IGF.IGM.SizeTy, startoffset.getValue());
return offset;
}
Expand Down
4 changes: 4 additions & 0 deletions lib/IRGen/StructLayout.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ StructLayout::StructLayout(IRGenModule &IGM,
assert(!builder.empty() == requiresHeapHeader(layoutKind));
MinimumAlign = Alignment(1);
MinimumSize = Size(0);
headerSize = builder.getHeaderSize();
SpareBits.clear();
IsFixedLayout = true;
IsKnownPOD = IsPOD;
Expand All @@ -79,6 +80,7 @@ StructLayout::StructLayout(IRGenModule &IGM,
} else {
MinimumAlign = builder.getAlignment();
MinimumSize = builder.getSize();
headerSize = builder.getHeaderSize();
SpareBits = builder.getSpareBits();
IsFixedLayout = builder.isFixedLayout();
IsKnownPOD = builder.isPOD();
Expand Down Expand Up @@ -186,13 +188,15 @@ void StructLayoutBuilder::addHeapHeader() {
CurSize = IGM.RefCountedStructSize;
CurAlignment = IGM.getPointerAlignment();
StructFields.push_back(IGM.RefCountedStructTy);
headerSize = CurSize;
}

void StructLayoutBuilder::addNSObjectHeader() {
assert(StructFields.empty() && "adding heap header at a non-zero offset");
CurSize = IGM.getPointerSize();
CurAlignment = IGM.getPointerAlignment();
StructFields.push_back(IGM.ObjCClassPtrTy);
headerSize = CurSize;
}

bool StructLayoutBuilder::addFields(llvm::MutableArrayRef<ElementLayout> elts,
Expand Down
9 changes: 9 additions & 0 deletions lib/IRGen/StructLayout.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,7 @@ class StructLayoutBuilder {
IRGenModule &IGM;
SmallVector<llvm::Type*, 8> StructFields;
Size CurSize = Size(0);
Size headerSize = Size(0);
private:
Alignment CurAlignment = Alignment(1);
SmallVector<SpareBitVector, 8> CurSpareBits;
Expand Down Expand Up @@ -314,6 +315,9 @@ class StructLayoutBuilder {
/// Return the size of the structure built so far.
Size getSize() const { return CurSize; }

// Return the size of the header.
Size getHeaderSize() const { return headerSize; }

/// Return the alignment of the structure built so far.
Alignment getAlignment() const { return CurAlignment; }

Expand Down Expand Up @@ -350,6 +354,9 @@ class StructLayout {

/// The statically-known minimum bound on the size.
Size MinimumSize;

/// The size of a header if present.
Size headerSize;

/// The statically-known spare bit mask.
SpareBitVector SpareBits;
Expand Down Expand Up @@ -386,6 +393,7 @@ class StructLayout {
ArrayRef<ElementLayout> elements)
: MinimumAlign(builder.getAlignment()),
MinimumSize(builder.getSize()),
headerSize(builder.getHeaderSize()),
SpareBits(builder.getSpareBits()),
IsFixedLayout(builder.isFixedLayout()),
IsKnownPOD(builder.isPOD()),
Expand All @@ -401,6 +409,7 @@ class StructLayout {

llvm::Type *getType() const { return Ty; }
Size getSize() const { return MinimumSize; }
Size getHeaderSize() const { return headerSize; }
Alignment getAlignment() const { return MinimumAlign; }
const SpareBitVector &getSpareBits() const { return SpareBits; }
SpareBitVector &getSpareBits() { return SpareBits; }
Expand Down
31 changes: 30 additions & 1 deletion test/IRGen/partial_apply.sil
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
// RUN: %target-swift-frontend %s -emit-ir | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize
// RUN: %empty-directory(%t)
// RUN: %target-swift-frontend -emit-module -enable-library-evolution -emit-module-path=%t/resilient_struct.swiftmodule -module-name=resilient_struct %S/../Inputs/resilient_struct.swift
// RUN: %target-swift-frontend -I %t -emit-ir %s | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize

// REQUIRES: CPU=x86_64

import Builtin
import Swift
import resilient_struct

class SwiftClass {}
sil_vtable SwiftClass {}
Expand Down Expand Up @@ -738,3 +741,29 @@ bb0(%x : $*SwiftClassPair):
%t = tuple()
return %t : $()
}

sil public_external @closure : $@convention(thin) (@in_guaranteed ResilientInt, @guaranteed SwiftClass) -> ()

// Make sure that we use the heap header size (16) for the initial offset.
// CHECK-LABEL: define{{.*}} swiftcc void @test_initial_offset(%swift.opaque* noalias nocapture %0, %T13partial_apply10SwiftClassC* %1)
// CHECK: [[T0:%.*]] = call swiftcc %swift.metadata_response @"$s16resilient_struct12ResilientIntVMa"
// CHECK: [[MD:%.*]] = extractvalue %swift.metadata_response [[T0]], 0
// CHECK: [[CAST:%.*]] = bitcast %swift.type* [[MD]] to i8***
// CHECK: [[VWT_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CAST]], i64 -1
// CHECK: [[VWT_TMP:%.*]] = load i8**, i8*** [[VWT_PTR]]
// CHECK: [[VWT:%.*]] = bitcast i8** [[VWT_TMP]] to %swift.vwtable*
// CHECK: [[FLAGS_PTR:%.*]] = getelementptr inbounds %swift.vwtable, %swift.vwtable* [[VWT]], i32 0, i32 10
// CHECK: [[FLAGS:%.*]] = load i32, i32* [[FLAGS_PTR]]
// CHECK: [[FLAGS2:%.*]] = zext i32 [[FLAGS]] to i64
// CHECK: [[ALIGNMASK:%.*]] = and i64 [[FLAGS2]], 255
// CHECK: = xor i64 [[ALIGNMASK]], -1
// CHECK: = add i64 16, [[ALIGNMASK]]

sil @test_initial_offset : $@convention(thin) (@in_guaranteed ResilientInt, @guaranteed SwiftClass) -> () {
bb0(%x : $*ResilientInt, %y : $SwiftClass):
%f = function_ref @closure : $@convention(thin) (@in_guaranteed ResilientInt, @guaranteed SwiftClass) -> ()
%p = partial_apply [callee_guaranteed] %f(%x, %y) : $@convention(thin) (@in_guaranteed ResilientInt, @guaranteed SwiftClass) -> ()
release_value %p : $@callee_guaranteed () ->()
%t = tuple()
return %t : $()
}