Skip to content

[5.3] IRGen: Correctly compute the offset for a non-fixed field after… #31924

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lib/IRGen/GenHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ static llvm::Value *calcInitOffset(swift::irgen::IRGenFunction &IGF,
auto &prevElt = layout.getElement(i - 1);
auto prevType = layout.getElementTypes()[i - 1];
// Start calculating offsets from the last fixed-offset field.
Size lastFixedOffset = layout.getElement(i - 1).getByteOffset();
Size lastFixedOffset = layout.getElement(i - 1).getByteOffsetDuringLayout();
if (auto *fixedType = dyn_cast<FixedTypeInfo>(&prevElt.getType())) {
// If the last fixed-offset field is also fixed-size, we can
// statically compute the end of the fixed-offset fields.
Expand Down
3 changes: 2 additions & 1 deletion lib/IRGen/StructLayout.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,8 @@ void StructLayoutBuilder::addNonFixedSizeElement(ElementLayout &elt) {

/// Add an empty element to the aggregate.
void StructLayoutBuilder::addEmptyElement(ElementLayout &elt) {
elt.completeEmpty(elt.getType().isPOD(ResilienceExpansion::Maximal));
auto byteOffset = isFixedLayout() ? CurSize : Size(0);
elt.completeEmpty(elt.getType().isPOD(ResilienceExpansion::Maximal), byteOffset);
}

/// Add an element at the fixed offset of the current end of the
Expand Down
26 changes: 25 additions & 1 deletion lib/IRGen/StructLayout.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,14 @@ class ElementLayout {
/// The offset in bytes from the start of the struct.
unsigned ByteOffset;

/// The offset in bytes from the start of the struct, except EmptyFields are
/// placed at the current byte offset instead of 0. For the purpose of the
/// final layout empty fields are placed at offset 0, that however creates a
/// whole slew of special cases to deal with. Instead of dealing with these
/// special cases during layout, we pretend that empty fields are placed
/// just like any other field at the current offset.
unsigned ByteOffsetForLayout;

/// The index of this element, either in the LLVM struct (if fixed)
/// or in the non-fixed elements array (if non-fixed).
unsigned Index : 28;
Expand Down Expand Up @@ -142,27 +150,31 @@ class ElementLayout {
TheKind = other.TheKind;
IsPOD = other.IsPOD;
ByteOffset = other.ByteOffset;
ByteOffsetForLayout = other.ByteOffsetForLayout;
Index = other.Index;
}

void completeEmpty(IsPOD_t isPOD) {
void completeEmpty(IsPOD_t isPOD, Size byteOffset) {
TheKind = unsigned(Kind::Empty);
IsPOD = unsigned(isPOD);
ByteOffset = 0;
ByteOffsetForLayout = byteOffset.getValue();
Index = 0; // make a complete write of the bitfield
}

void completeInitialNonFixedSize(IsPOD_t isPOD) {
TheKind = unsigned(Kind::InitialNonFixedSize);
IsPOD = unsigned(isPOD);
ByteOffset = 0;
ByteOffsetForLayout = ByteOffset;
Index = 0; // make a complete write of the bitfield
}

void completeFixed(IsPOD_t isPOD, Size byteOffset, unsigned structIndex) {
TheKind = unsigned(Kind::Fixed);
IsPOD = unsigned(isPOD);
ByteOffset = byteOffset.getValue();
ByteOffsetForLayout = ByteOffset;
Index = structIndex;

assert(getByteOffset() == byteOffset);
Expand All @@ -172,6 +184,7 @@ class ElementLayout {
TheKind = unsigned(Kind::EmptyTailAllocatedCType);
IsPOD = unsigned(isPOD);
ByteOffset = byteOffset.getValue();
ByteOffsetForLayout = ByteOffset;
Index = 0;

assert(getByteOffset() == byteOffset);
Expand Down Expand Up @@ -228,6 +241,17 @@ class ElementLayout {
return Size(ByteOffset);
}

/// The offset in bytes from the start of the struct, except EmptyFields are
/// placed at the current byte offset instead of 0. For the purpose of the
/// final layout empty fields are placed at offset 0, that however creates a
/// whole slew of special cases to deal with. Instead of dealing with these
/// special cases during layout, we pretend that empty fields are placed
/// just like any other field at the current offset.
Size getByteOffsetDuringLayout() const {
assert(isCompleted() && hasByteOffset());
return Size(ByteOffsetForLayout);
}

/// Given that this element has a fixed offset, return the index in
/// the LLVM struct.
unsigned getStructIndex() const {
Expand Down
79 changes: 79 additions & 0 deletions test/IRGen/partial_apply.sil
Original file line number Diff line number Diff line change
Expand Up @@ -767,3 +767,82 @@ bb0(%x : $*ResilientInt, %y : $SwiftClass):
%t = tuple()
return %t : $()
}

protocol Proto1 {}
protocol Proto2 {}
struct EmptyType : Proto1 { }

struct SomeType : Proto2 {
var d : ResilientInt // some resilient type
var x : Int
}

sil @foo : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : Proto1, τ_0_1 : Proto2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> ()

// CHECK-64-LABEL: define{{.*}} swiftcc void @empty_followed_by_non_fixed(%T13partial_apply8SomeTypeV* noalias nocapture %0)
// CHECK-64: [[FLAGS:%.*]] = load i32, i32*
// CHECK-64: [[FLAGS2:%.*]] = zext i32 [[FLAGS]] to i64
// CHECK-64: [[ALIGNMASK:%.*]] = and i64 [[FLAGS2]], 255
// CHECK-64: [[NOTALIGNMASK:%.*]] = xor i64 [[ALIGNMASK]], -1
// Make sure we take the header offset (16) into account.
// CHECK-64: [[TMP:%.*]] = add i64 16, [[ALIGNMASK]]
// CHECK-64: [[OFFSET:%.*]] = and i64 [[TMP]], [[NOTALIGNMASK]]
// CHECK-64: [[CONTEXT:%.*]] = call noalias %swift.refcounted* @swift_allocObject
// CHECK-64: [[CAST:%.*]] = bitcast %swift.refcounted* [[CONTEXT]] to <{ %swift.refcounted }>*
// CHECK-64: [[CAST2:%.*]] = bitcast <{ %swift.refcounted }>* [[CAST]] to i8*
// CHECK-64: [[GEP:%.*]] = getelementptr inbounds i8, i8* [[CAST2]], i64 [[OFFSET]]
// CHECK-64: [[CAST3:%.*]] = bitcast i8* [[GEP]] to %T13partial_apply8SomeTypeV*
// CHECK-64: call %T13partial_apply8SomeTypeV* @"$s13partial_apply8SomeTypeVWOb"(%T13partial_apply8SomeTypeV* {{.*}}, %T13partial_apply8SomeTypeV* [[CAST3]])

sil @empty_followed_by_non_fixed : $@convention(thin) (EmptyType, @in_guaranteed SomeType) -> () {
entry(%0 : $EmptyType, %1: $*SomeType):
%5 = alloc_stack $EmptyType
store %0 to %5 : $*EmptyType
%31 = function_ref @foo : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : Proto1, τ_0_1 : Proto2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> ()
%32 = alloc_stack $EmptyType
copy_addr %5 to [initialization] %32 : $*EmptyType
%34 = alloc_stack $SomeType
copy_addr %1 to [initialization] %34 : $*SomeType // id: %35
%36 = partial_apply [callee_guaranteed] %31<EmptyType, SomeType>(%32, %34) : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : Proto1, τ_0_1 : Proto2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> ()
release_value %36: $@callee_guaranteed () ->()
dealloc_stack %34 : $*SomeType
dealloc_stack %32 : $*EmptyType
dealloc_stack %5 : $*EmptyType
%40 = tuple()
return %40 : $()
}

struct FixedType {
var f: Int32
}
// CHECK-64-LABEL: define{{.*}} swiftcc void @fixed_followed_by_empty_followed_by_non_fixed
// CHECK-64-NOT: ret
// CHECK-64: [[FLAGS:%.*]] = load i32, i32*
// CHECK-64: [[FLAGS2:%.*]] = zext i32 [[FLAGS]] to i64
// CHECK-64: [[ALIGNMASK:%.*]] = and i64 [[FLAGS2]], 255
// CHECK-64: [[NOTALIGNMASK:%.*]] = xor i64 [[ALIGNMASK]], -1
// Make sure we compute the correct offset of the non-fixed field.
// CHECK-64: [[TMP:%.*]] = add i64 20, [[ALIGNMASK]]
// CHECK-64: ret

sil @foo2 : $@convention(thin) <τ_0_0, τ_0_1, τ_0_2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1, @in_guaranteed τ_0_2) -> ()
sil @fixed_followed_by_empty_followed_by_non_fixed : $@convention(thin) (EmptyType, @in_guaranteed SomeType, FixedType) -> () {
entry(%0 : $EmptyType, %1: $*SomeType, %3: $FixedType):
%5 = alloc_stack $EmptyType
store %0 to %5 : $*EmptyType
%7 = alloc_stack $FixedType
store %3 to %7 : $*FixedType
%31 = function_ref @foo2 : $@convention(thin) <τ_0_0, τ_0_1, τ_0_2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1, @in_guaranteed τ_0_2) -> ()
%32 = alloc_stack $EmptyType
copy_addr %5 to [initialization] %32 : $*EmptyType
%34 = alloc_stack $SomeType
copy_addr %1 to [initialization] %34 : $*SomeType // id: %35
%36 = partial_apply [callee_guaranteed] %31<FixedType, EmptyType, SomeType>(%7, %32, %34) : $@convention(thin) <τ_0_0, τ_0_1, τ_0_2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1, @in_guaranteed τ_0_2) -> ()
release_value %36: $@callee_guaranteed () ->()
dealloc_stack %34 : $*SomeType
dealloc_stack %32 : $*EmptyType
dealloc_stack %7 : $*FixedType
dealloc_stack %5 : $*EmptyType
%40 = tuple()
return %40 : $()
}