Skip to content

Commit dbca463

Browse files
committed
[5.3] IRGen: Correctly compute the offset for a non-fixed field after an empty field
Introduce getByteOffsetDuringLayout() so that we can treat empty fields just like other fixed sized field while performing layout. Explanation: We used to incorrectly compute the offset of a non-fixed field after an empty field inside of a closure capture context. This lead to crashes at runtime because we read memory that the wrong offset. Scope: Causes Swift programs that caputre empty fields and non-fixed fields to crash or misbehave. Risk: Medium, we now correctly compute the offset in such cases. Testing: Regression test added. Reviewer: Slava Pestov rdar://62349871
1 parent deefd5e commit dbca463

File tree

4 files changed

+107
-3
lines changed

4 files changed

+107
-3
lines changed

lib/IRGen/GenHeap.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -292,7 +292,7 @@ static llvm::Value *calcInitOffset(swift::irgen::IRGenFunction &IGF,
292292
auto &prevElt = layout.getElement(i - 1);
293293
auto prevType = layout.getElementTypes()[i - 1];
294294
// Start calculating offsets from the last fixed-offset field.
295-
Size lastFixedOffset = layout.getElement(i - 1).getByteOffset();
295+
Size lastFixedOffset = layout.getElement(i - 1).getByteOffsetDuringLayout();
296296
if (auto *fixedType = dyn_cast<FixedTypeInfo>(&prevElt.getType())) {
297297
// If the last fixed-offset field is also fixed-size, we can
298298
// statically compute the end of the fixed-offset fields.

lib/IRGen/StructLayout.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,8 @@ void StructLayoutBuilder::addNonFixedSizeElement(ElementLayout &elt) {
312312

313313
/// Add an empty element to the aggregate.
314314
void StructLayoutBuilder::addEmptyElement(ElementLayout &elt) {
315-
elt.completeEmpty(elt.getType().isPOD(ResilienceExpansion::Maximal));
315+
auto byteOffset = isFixedLayout() ? CurSize : Size(0);
316+
elt.completeEmpty(elt.getType().isPOD(ResilienceExpansion::Maximal), byteOffset);
316317
}
317318

318319
/// Add an element at the fixed offset of the current end of the

lib/IRGen/StructLayout.h

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,14 @@ class ElementLayout {
114114
/// The offset in bytes from the start of the struct.
115115
unsigned ByteOffset;
116116

117+
/// The offset in bytes from the start of the struct, except EmptyFields are
118+
/// placed at the current byte offset instead of 0. For the purpose of the
119+
/// final layout empty fields are placed at offset 0, that however creates a
120+
/// whole slew of special cases to deal with. Instead of dealing with these
121+
/// special cases during layout, we pretend that empty fields are placed
122+
/// just like any other field at the current offset.
123+
unsigned ByteOffsetForLayout;
124+
117125
/// The index of this element, either in the LLVM struct (if fixed)
118126
/// or in the non-fixed elements array (if non-fixed).
119127
unsigned Index : 28;
@@ -142,27 +150,31 @@ class ElementLayout {
142150
TheKind = other.TheKind;
143151
IsPOD = other.IsPOD;
144152
ByteOffset = other.ByteOffset;
153+
ByteOffsetForLayout = other.ByteOffsetForLayout;
145154
Index = other.Index;
146155
}
147156

148-
void completeEmpty(IsPOD_t isPOD) {
157+
void completeEmpty(IsPOD_t isPOD, Size byteOffset) {
149158
TheKind = unsigned(Kind::Empty);
150159
IsPOD = unsigned(isPOD);
151160
ByteOffset = 0;
161+
ByteOffsetForLayout = byteOffset.getValue();
152162
Index = 0; // make a complete write of the bitfield
153163
}
154164

155165
void completeInitialNonFixedSize(IsPOD_t isPOD) {
156166
TheKind = unsigned(Kind::InitialNonFixedSize);
157167
IsPOD = unsigned(isPOD);
158168
ByteOffset = 0;
169+
ByteOffsetForLayout = ByteOffset;
159170
Index = 0; // make a complete write of the bitfield
160171
}
161172

162173
void completeFixed(IsPOD_t isPOD, Size byteOffset, unsigned structIndex) {
163174
TheKind = unsigned(Kind::Fixed);
164175
IsPOD = unsigned(isPOD);
165176
ByteOffset = byteOffset.getValue();
177+
ByteOffsetForLayout = ByteOffset;
166178
Index = structIndex;
167179

168180
assert(getByteOffset() == byteOffset);
@@ -172,6 +184,7 @@ class ElementLayout {
172184
TheKind = unsigned(Kind::EmptyTailAllocatedCType);
173185
IsPOD = unsigned(isPOD);
174186
ByteOffset = byteOffset.getValue();
187+
ByteOffsetForLayout = ByteOffset;
175188
Index = 0;
176189

177190
assert(getByteOffset() == byteOffset);
@@ -228,6 +241,17 @@ class ElementLayout {
228241
return Size(ByteOffset);
229242
}
230243

244+
/// The offset in bytes from the start of the struct, except EmptyFields are
245+
/// placed at the current byte offset instead of 0. For the purpose of the
246+
/// final layout empty fields are placed at offset 0, that however creates a
247+
/// whole slew of special cases to deal with. Instead of dealing with these
248+
/// special cases during layout, we pretend that empty fields are placed
249+
/// just like any other field at the current offset.
250+
Size getByteOffsetDuringLayout() const {
251+
assert(isCompleted() && hasByteOffset());
252+
return Size(ByteOffsetForLayout);
253+
}
254+
231255
/// Given that this element has a fixed offset, return the index in
232256
/// the LLVM struct.
233257
unsigned getStructIndex() const {

test/IRGen/partial_apply.sil

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -767,3 +767,82 @@ bb0(%x : $*ResilientInt, %y : $SwiftClass):
767767
%t = tuple()
768768
return %t : $()
769769
}
770+
771+
protocol Proto1 {}
772+
protocol Proto2 {}
773+
struct EmptyType : Proto1 { }
774+
775+
struct SomeType : Proto2 {
776+
var d : ResilientInt // some resilient type
777+
var x : Int
778+
}
779+
780+
sil @foo : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : Proto1, τ_0_1 : Proto2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> ()
781+
782+
// CHECK-64-LABEL: define{{.*}} swiftcc void @empty_followed_by_non_fixed(%T13partial_apply8SomeTypeV* noalias nocapture %0)
783+
// CHECK-64: [[FLAGS:%.*]] = load i32, i32*
784+
// CHECK-64: [[FLAGS2:%.*]] = zext i32 [[FLAGS]] to i64
785+
// CHECK-64: [[ALIGNMASK:%.*]] = and i64 [[FLAGS2]], 255
786+
// CHECK-64: [[NOTALIGNMASK:%.*]] = xor i64 [[ALIGNMASK]], -1
787+
// Make sure we take the header offset (16) into account.
788+
// CHECK-64: [[TMP:%.*]] = add i64 16, [[ALIGNMASK]]
789+
// CHECK-64: [[OFFSET:%.*]] = and i64 [[TMP]], [[NOTALIGNMASK]]
790+
// CHECK-64: [[CONTEXT:%.*]] = call noalias %swift.refcounted* @swift_allocObject
791+
// CHECK-64: [[CAST:%.*]] = bitcast %swift.refcounted* [[CONTEXT]] to <{ %swift.refcounted }>*
792+
// CHECK-64: [[CAST2:%.*]] = bitcast <{ %swift.refcounted }>* [[CAST]] to i8*
793+
// CHECK-64: [[GEP:%.*]] = getelementptr inbounds i8, i8* [[CAST2]], i64 [[OFFSET]]
794+
// CHECK-64: [[CAST3:%.*]] = bitcast i8* [[GEP]] to %T13partial_apply8SomeTypeV*
795+
// CHECK-64: call %T13partial_apply8SomeTypeV* @"$s13partial_apply8SomeTypeVWOb"(%T13partial_apply8SomeTypeV* {{.*}}, %T13partial_apply8SomeTypeV* [[CAST3]])
796+
797+
sil @empty_followed_by_non_fixed : $@convention(thin) (EmptyType, @in_guaranteed SomeType) -> () {
798+
entry(%0 : $EmptyType, %1: $*SomeType):
799+
%5 = alloc_stack $EmptyType
800+
store %0 to %5 : $*EmptyType
801+
%31 = function_ref @foo : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : Proto1, τ_0_1 : Proto2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> ()
802+
%32 = alloc_stack $EmptyType
803+
copy_addr %5 to [initialization] %32 : $*EmptyType
804+
%34 = alloc_stack $SomeType
805+
copy_addr %1 to [initialization] %34 : $*SomeType // id: %35
806+
%36 = partial_apply [callee_guaranteed] %31<EmptyType, SomeType>(%32, %34) : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : Proto1, τ_0_1 : Proto2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> ()
807+
release_value %36: $@callee_guaranteed () ->()
808+
dealloc_stack %34 : $*SomeType
809+
dealloc_stack %32 : $*EmptyType
810+
dealloc_stack %5 : $*EmptyType
811+
%40 = tuple()
812+
return %40 : $()
813+
}
814+
815+
struct FixedType {
816+
var f: Int32
817+
}
818+
// CHECK-64-LABEL: define{{.*}} swiftcc void @fixed_followed_by_empty_followed_by_non_fixed
819+
// CHECK-64-NOT: ret
820+
// CHECK-64: [[FLAGS:%.*]] = load i32, i32*
821+
// CHECK-64: [[FLAGS2:%.*]] = zext i32 [[FLAGS]] to i64
822+
// CHECK-64: [[ALIGNMASK:%.*]] = and i64 [[FLAGS2]], 255
823+
// CHECK-64: [[NOTALIGNMASK:%.*]] = xor i64 [[ALIGNMASK]], -1
824+
// Make sure we compute the correct offset of the non-fixed field.
825+
// CHECK-64: [[TMP:%.*]] = add i64 20, [[ALIGNMASK]]
826+
// CHECK-64: ret
827+
828+
sil @foo2 : $@convention(thin) <τ_0_0, τ_0_1, τ_0_2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1, @in_guaranteed τ_0_2) -> ()
829+
sil @fixed_followed_by_empty_followed_by_non_fixed : $@convention(thin) (EmptyType, @in_guaranteed SomeType, FixedType) -> () {
830+
entry(%0 : $EmptyType, %1: $*SomeType, %3: $FixedType):
831+
%5 = alloc_stack $EmptyType
832+
store %0 to %5 : $*EmptyType
833+
%7 = alloc_stack $FixedType
834+
store %3 to %7 : $*FixedType
835+
%31 = function_ref @foo2 : $@convention(thin) <τ_0_0, τ_0_1, τ_0_2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1, @in_guaranteed τ_0_2) -> ()
836+
%32 = alloc_stack $EmptyType
837+
copy_addr %5 to [initialization] %32 : $*EmptyType
838+
%34 = alloc_stack $SomeType
839+
copy_addr %1 to [initialization] %34 : $*SomeType // id: %35
840+
%36 = partial_apply [callee_guaranteed] %31<FixedType, EmptyType, SomeType>(%7, %32, %34) : $@convention(thin) <τ_0_0, τ_0_1, τ_0_2> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1, @in_guaranteed τ_0_2) -> ()
841+
release_value %36: $@callee_guaranteed () ->()
842+
dealloc_stack %34 : $*SomeType
843+
dealloc_stack %32 : $*EmptyType
844+
dealloc_stack %7 : $*FixedType
845+
dealloc_stack %5 : $*EmptyType
846+
%40 = tuple()
847+
return %40 : $()
848+
}

0 commit comments

Comments
 (0)